diff --git a/.editorconfig b/.editorconfig index 47fde53b690b5b86037ede8cbf0337db8a472d7d..2b40ec32fac3e935b4f85e70bf339224d5e8f8b0 100644 --- a/.editorconfig +++ b/.editorconfig @@ -14,3 +14,9 @@ indent_style=space indent_size=2 tab_width=8 end_of_line=lf + +[*.sh] +indent_style=space +indent_size=2 +tab_width=8 +end_of_line=lf diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md new file mode 100644 index 0000000000000000000000000000000000000000..6067dbf12fa70ab43a3065e59e7e68375b721049 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/release.md @@ -0,0 +1,9 @@ +--- +title: Release failure for {{ ref }} +--- + +Pipeline for release {{ ref }} failed. Please investigate. + +If the pipeline has failed before pushing to crates.io, delete the release tag +and fix the release as necessary, retagging after complete. If the pipeline has +failed after pushing to crates.io, create a new tag incrementing the version. diff --git a/.github/workflows/check-gitlab-pipeline.yml b/.github/workflows/check-gitlab-pipeline.yml new file mode 100644 index 0000000000000000000000000000000000000000..c87f17c2f732e447cacf297cfe41e7b266a4bc99 --- /dev/null +++ b/.github/workflows/check-gitlab-pipeline.yml @@ -0,0 +1,30 @@ +# A github action to track the status of the gitlab pipeline for tagged +# releases, and cancel the release/create a new issue if it fails + +name: Monitor gitlab pipeline status + +on: + push: + tags: + - v* + - ci-release-* + +jobs: + monitor: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Monitor pipeline + run: env; ./.maintain/github/check_gitlab_pipeline.sh + id: monitor_pipeline + env: + TAGGER: ${{ github.event.pusher.name }} + - name: Create Issue + if: failure() + uses: JasonEtco/create-an-issue@v2 + with: + filename: .github/ISSUE_TEMPLATE/release.md + assignees: ${{ github.event.pusher.name }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index aadaa13912c19dbc056245784442d8b4c9de2a82..6398c09fe796278130978c2a8598ee9270399388 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ rls*.log *.rej **/wip/*.stderr .local +**/hfuzz_target/ +**/hfuzz_workspace/ diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 98cf4dd1d8c4f6109f2402ee44c890405dbdf968..2c141c51d129617d06ad73eeecb7d652a1a876fa 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,6 +24,7 @@ stages: - test - build + - post-build-test - publish - kubernetes - flaming-fir @@ -31,15 +32,15 @@ stages: variables: GIT_STRATEGY: fetch GIT_DEPTH: 100 - CARGO_HOME: "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_JOB_NAME}" + CARGO_HOME: "/ci-cache/${CI_PROJECT_NAME}/cargo/${CI_COMMIT_REF_NAME}/${CI_JOB_NAME}" SCCACHE_DIR: "/ci-cache/${CI_PROJECT_NAME}/sccache" CARGO_INCREMENTAL: 0 CI_SERVER_NAME: "GitLab CI" DOCKER_OS: "debian:stretch" ARCH: "x86_64" # FIXME set to release - CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.8" - CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" + CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.10" + CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder sp-arithmetic-fuzzer" .collect-artifacts: &collect-artifacts @@ -127,6 +128,17 @@ check-line-width: allow_failure: true + + +check-polkadot-companion-build: + stage: build + <<: *docker-env + script: + - ./.maintain/gitlab/check_polkadot_companion_build.sh + interruptible: true + allow_failure: true + + cargo-audit: stage: test <<: *docker-env @@ -136,6 +148,20 @@ cargo-audit: - cargo audit allow_failure: true +cargo-deny: + stage: test + <<: *docker-env + script: + - cargo deny check --hide-inclusion-graph -c .maintain/deny.toml + after_script: + - echo "___The complete log is in the artifacts___" + - cargo deny check -c .maintain/deny.toml 2> deny.log + artifacts: + name: $CI_COMMIT_SHORT_SHA + expire_in: 3 days + when: always + paths: + - deny.log cargo-check-benches: stage: test @@ -167,17 +193,17 @@ test-linux-stable: &test-linux variables: - $DEPLOY_TAG script: - - WASM_BUILD_NO_COLOR=1 time cargo test --all --release --verbose --locked | + - WASM_BUILD_NO_COLOR=1 time cargo test --all --release --verbose --locked |& tee output.log - sccache -s - after_script: - - echo "___Collecting warnings for check_warnings job___" + - echo "____Test job successful, checking for warnings____" - awk '/^warning:/,/^$/ { print }' output.log > ${CI_COMMIT_SHORT_SHA}_warnings.log - artifacts: - name: $CI_COMMIT_SHORT_SHA - expire_in: 3 days - paths: - - ${CI_COMMIT_SHORT_SHA}_warnings.log + - if [ -s ${CI_COMMIT_SHORT_SHA}_warnings.log ]; then + cat ${CI_COMMIT_SHORT_SHA}_warnings.log; + exit 1; + else + echo "___No warnings___"; + fi test-dependency-rules: stage: test @@ -214,6 +240,24 @@ test-frame-staking: - WASM_BUILD_NO_COLOR=1 time cargo test --release --verbose --no-default-features --features std - sccache -s +test-frame-examples-compile-to-wasm: + stage: test + <<: *docker-env + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: -Cdebug-assertions=y + RUST_BACKTRACE: 1 + except: + variables: + - $DEPLOY_TAG + script: + - cd frame/example-offchain-worker/ + - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features + - cd ../example + - cargo +nightly build --target=wasm32-unknown-unknown --no-default-features + - sccache -s + test-wasmtime: stage: test <<: *docker-env @@ -230,6 +274,22 @@ test-wasmtime: - WASM_BUILD_NO_COLOR=1 time cargo test --release --verbose --features wasmtime - sccache -s +test-runtime-benchmarks: + stage: test + <<: *docker-env + variables: + # Enable debug assertions since we are running optimized builds for testing + # but still want to have debug assertions. + RUSTFLAGS: -Cdebug-assertions=y + RUST_BACKTRACE: 1 + except: + variables: + - $DEPLOY_TAG + script: + - cd bin/node/cli + - WASM_BUILD_NO_COLOR=1 time cargo test --release --verbose --features runtime-benchmarks + - sccache -s + test-linux-stable-int: <<: *test-linux except: @@ -271,7 +331,7 @@ check-web-wasm: - time cargo build --target=wasm32-unknown-unknown -p sp-consensus - time cargo build --target=wasm32-unknown-unknown -p sc-telemetry # Note: the command below is a bit weird because several Cargo issues prevent us from compiling the node in a more straight-forward way. - - time cargo build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features "browser" --target=wasm32-unknown-unknown + - time cargo +nightly build --manifest-path=bin/node/cli/Cargo.toml --no-default-features --features "browser" --target=wasm32-unknown-unknown -Z features=itarget - sccache -s test-full-crypto-feature: @@ -357,49 +417,16 @@ build-rust-doc-release: - echo "" > ./crate-docs/index.html - sccache -s -check_warnings: - stage: build - <<: *docker-env - except: - variables: - - $DEPLOY_TAG - variables: - GIT_STRATEGY: none - needs: - - job: test-linux-stable - artifacts: true - script: - - if [ -s ${CI_COMMIT_SHORT_SHA}_warnings.log ]; then - cat ${CI_COMMIT_SHORT_SHA}_warnings.log; - exit 1; - else - echo "___No warnings___"; - fi - -# Nightly check whether Polkadot 'master' branch builds. -check_polkadot: - stage: build - <<: *docker-env - allow_failure: true +check-polkadot-companion-status: + stage: post-build-test + image: parity/tools:latest + <<: *kubernetes-build only: - - master - - schedules + - /^[0-9]+$/ script: - - SUBSTRATE_PATH=$(pwd) - # Clone the current Polkadot master branch into ./polkadot. - - git clone --depth 1 https://gitlab.parity.io/parity/polkadot.git - - cd polkadot - # Make sure we override the crates in native and wasm build - - mkdir .cargo - - echo "paths = [ \"$SUBSTRATE_PATH\" ]" > .cargo/config - - mkdir -p target/debug/wbuild/.cargo - - echo "paths = [ \"$SUBSTRATE_PATH\" ]" > target/debug/wbuild/.cargo/config - # package, others are updated along the way. - - cargo update - # Check whether Polkadot 'master' branch builds with this Substrate commit. - - time cargo check - - cd - - - sccache -s + - ./.maintain/gitlab/check_polkadot_companion_status.sh + + trigger-contracts-ci: stage: publish @@ -578,7 +605,7 @@ publish-to-crates-io: <<: *docker-env only: - tags - - /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + - /^ci-release-.*$/ script: - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} - cargo unleash em-dragons --no-check ${CARGO_UNLEASH_PKG_DEF} diff --git a/.maintain/Dockerfile b/.maintain/Dockerfile index 2fc1532aa2837b3314eaf17a907ca765d33c85b5..21a41720f7d65ffd41a35cb1fa425e5999c9e289 100644 --- a/.maintain/Dockerfile +++ b/.maintain/Dockerfile @@ -39,6 +39,10 @@ RUN mv /usr/share/ca* /tmp && \ ln -s /substrate/.local/share/substrate /data COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin +COPY --from=builder /substrate/target/$PROFILE/subkey /usr/local/bin +COPY --from=builder /substrate/target/$PROFILE/node-rpc-client /usr/local/bin +COPY --from=builder /substrate/target/$PROFILE/node-template /usr/local/bin +COPY --from=builder /substrate/target/$PROFILE/chain-spec-builder /usr/local/bin # checks RUN ldd /usr/local/bin/substrate && \ diff --git a/.maintain/deny.toml b/.maintain/deny.toml new file mode 100644 index 0000000000000000000000000000000000000000..8cc7635d5049be795e994b0ba740d9273b2f520f --- /dev/null +++ b/.maintain/deny.toml @@ -0,0 +1,193 @@ +# This template contains all of the possible sections and their default values + +# Note that all fields that take a lint level have these possible values: +# * deny - An error will be produced and the check will fail +# * warn - A warning will be produced, but the check will not fail +# * allow - No warning or error will be produced, though in some cases a note +# will be + +# The values provided in this template are the default values that will be used +# when any section or field is not specified in your own configuration + +# If 1 or more target triples (and optionally, target_features) are specified, +# only the specified targets will be checked when running `cargo deny check`. +# This means, if a particular package is only ever used as a target specific +# dependency, such as, for example, the `nix` crate only being used via the +# `target_family = "unix"` configuration, that only having windows targets in +# this list would mean the nix crate, as well as any of its exclusive +# dependencies not shared by any other crates, would be ignored, as the target +# list here is effectively saying which targets you are building for. +targets = [ + # The triple can be any string, but only the target triples built in to + # rustc (as of 1.40) can be checked against actual config expressions + #{ triple = "x86_64-unknown-linux-musl" }, + # You can also specify which target_features you promise are enabled for a + # particular target. target_features are currently not validated against + # the actual valid features supported by the target architecture. + #{ triple = "wasm32-unknown-unknown", features = ["atomics"] }, +] + +# This section is considered when running `cargo deny check advisories` +# More documentation for the advisories section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/advisories/cfg.html +[advisories] +# The path where the advisory database is cloned/fetched into +db-path = "~/.cargo/advisory-db" +# The url of the advisory database to use +db-url = "https://github.com/rustsec/advisory-db" +# The lint level for security vulnerabilities +vulnerability = "deny" +# The lint level for unmaintained crates +unmaintained = "warn" +# The lint level for crates that have been yanked from their source registry +yanked = "warn" +# The lint level for crates with security notices. Note that as of +# 2019-12-17 there are no security notice advisories in +# https://github.com/rustsec/advisory-db +notice = "warn" +# A list of advisory IDs to ignore. Note that ignored advisories will still +# output a note when they are encountered. +ignore = [ + #"RUSTSEC-0000-0000", +] +# Threshold for security vulnerabilities, any vulnerability with a CVSS score +# lower than the range specified will be ignored. Note that ignored advisories +# will still output a note when they are encountered. +# * None - CVSS Score 0.0 +# * Low - CVSS Score 0.1 - 3.9 +# * Medium - CVSS Score 4.0 - 6.9 +# * High - CVSS Score 7.0 - 8.9 +# * Critical - CVSS Score 9.0 - 10.0 +#severity-threshold = + +# This section is considered when running `cargo deny check licenses` +# More documentation for the licenses section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/licenses/cfg.html +[licenses] +# The lint level for crates which do not have a detectable license +unlicensed = "deny" +# List of explictly allowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +allow = [ + #"MIT", + #"Apache-2.0", + #"Apache-2.0 WITH LLVM-exception", +] +# List of explictly disallowed licenses +# See https://spdx.org/licenses/ for list of possible licenses +# [possible values: any SPDX 3.7 short identifier (+ optional exception)]. +deny = [ + #"Nokia", +] +# Lint level for licenses considered copyleft +copyleft = "allow" +# Blanket approval or denial for OSI-approved or FSF Free/Libre licenses +# * both - The license will be approved if it is both OSI-approved *AND* FSF +# * either - The license will be approved if it is either OSI-approved *OR* FSF +# * osi-only - The license will be approved if is OSI-approved *AND NOT* FSF +# * fsf-only - The license will be approved if is FSF *AND NOT* OSI-approved +# * neither - This predicate is ignored and the default lint level is used +allow-osi-fsf-free = "either" +# Lint level used when no other predicates are matched +# 1. License isn't in the allow or deny lists +# 2. License isn't copyleft +# 3. License isn't OSI/FSF, or allow-osi-fsf-free = "neither" +default = "deny" +# The confidence threshold for detecting a license from license text. +# The higher the value, the more closely the license text must be to the +# canonical license text of a valid SPDX license file. +# [possible values: any between 0.0 and 1.0]. +confidence-threshold = 0.9 +# Allow 1 or more licenses on a per-crate basis, so that particular licenses +# aren't accepted for every possible crate as with the normal allow list +exceptions = [ + # Each entry is the crate and version constraint, and its specific allow + # list + #{ allow = ["Zlib"], name = "adler32", version = "*" }, +] + +# Some crates don't have (easily) machine readable licensing information, +# adding a clarification entry for it allows you to manually specify the +# licensing information +[[licenses.clarify]] +# The name of the crate the clarification applies to +name = "ring" +# THe optional version constraint for the crate +#version = "*" +# The SPDX expression for the license requirements of the crate +expression = "OpenSSL" +# One or more files in the crate's source used as the "source of truth" for +# the license expression. If the contents match, the clarification will be used +# when running the license check, otherwise the clarification will be ignored +# and the crate will be checked normally, which may produce warnings or errors +# depending on the rest of your configuration +license-files = [ + # Each entry is a crate relative path, and the (opaque) hash of its contents + { path = "LICENSE", hash = 0xbd0eed23 } +] +[[licenses.clarify]] +name = "webpki" +expression = "ISC" +license-files = [{ path = "LICENSE", hash = 0x001c7e6c }] + +[licenses.private] +# If true, ignores workspace crates that aren't published, or are only +# published to private registries +ignore = false +# One or more private registries that you might publish crates to, if a crate +# is only published to private registries, and ignore is true, the crate will +# not have its license(s) checked +registries = [ + #"https://sekretz.com/registry +] + +# This section is considered when running `cargo deny check bans`. +# More documentation about the 'bans' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/bans/cfg.html +[bans] +# Lint level for when multiple versions of the same crate are detected +multiple-versions = "warn" +# The graph highlighting used when creating dotgraphs for crates +# with multiple versions +# * lowest-version - The path to the lowest versioned duplicate is highlighted +# * simplest-path - The path to the version with the fewest edges is highlighted +# * all - Both lowest-version and simplest-path are used +highlight = "lowest-version" +# List of crates that are allowed. Use with care! +allow = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# List of crates to deny +deny = [ + { name = "parity-util-mem", version = "<0.6" } + # Each entry the name of a crate and a version range. If version is + # not specified, all versions will be matched. +] +# Certain crates/versions that will be skipped when doing duplicate detection. +skip = [ + #{ name = "ansi_term", version = "=0.11.0" }, +] +# Similarly to `skip` allows you to skip certain crates during duplicate +# detection. Unlike skip, it also includes the entire tree of transitive +# dependencies starting at the specified crate, up to a certain depth, which is +# by default infinite +skip-tree = [ + #{ name = "ansi_term", version = "=0.11.0", depth = 20 }, +] + +# This section is considered when running `cargo deny check sources`. +# More documentation about the 'sources' section can be found here: +# https://embarkstudios.github.io/cargo-deny/checks/sources/cfg.html +[sources] +# Lint level for what to happen when a crate from a crate registry that is not +# in the allow list is encountered +unknown-registry = "deny" +# Lint level for what to happen when a crate from a git repository that is not +# in the allow list is encountered +unknown-git = "warn" +# List of URLs for allowed crate registries. Defaults to the crates.io index +# if not specified. If it is specified but empty, no registries are allowed. +allow-registry = ["https://github.com/rust-lang/crates.io-index"] +# List of URLs for allowed Git repositories +allow-git = [] diff --git a/.maintain/github/check_gitlab_pipeline.sh b/.maintain/github/check_gitlab_pipeline.sh new file mode 100755 index 0000000000000000000000000000000000000000..e7aaff15bf6a85b49c8d613efe64e1451a0800be --- /dev/null +++ b/.maintain/github/check_gitlab_pipeline.sh @@ -0,0 +1,37 @@ +#!/bin/bash +SUBSTRATE_API_BASEURL="https://gitlab.parity.io/api/v4/projects/145" + +TAG_NAME=$(echo "$GITHUB_REF" | sed -E 's_refs/tags/(.*)_\1_') +PIPELINE_ID=$(curl -s $SUBSTRATE_API_BASEURL/pipelines | jq -r "map(select(.ref==\"$TAG_NAME\")) | .[0] | .id") +if [ "$PIPELINE_ID" == "null" ]; then + echo "[!] Pipeline for $TAG_NAME not found. Exiting." + exit 1 +fi + +echo "[+] Pipeline path: https://gitlab.parity.io/parity/substrate/pipelines/$PIPELINE_ID" + +# 130 minute job max +for (( c=0; c < 130; c++ )); do + out=$(curl -s "$SUBSTRATE_API_BASEURL/pipelines/$PIPELINE_ID" | jq -r .status) + case $out in + "success") + echo "[+] Pipeline $PIPELINE_ID for $TAG_NAME succeeded!" + exit 0 + ;; + "failed") + echo "[!] Pipeline $PIPELINE_ID for $TAG_NAME failed. Cannot proceed. Check job output on gitlab!" + exit 1 + ;; + "cancelled") + echo "[!] Pipeline $PIPELINE_ID for $TAG_NAME was cancelled. Cannot proceed!" + exit 1 + ;; + "running") + echo "[-] Pipeline $PIPELINE_ID for $TAG_NAME still in progress..." + esac + sleep 60 +done +# If we reach here, we timed out after 30 minutes +echo "[!] Pipeline $PIPELINE_ID for $TAG_NAME timed out! Cannot proceed" +echo "::set-output name=pipeline_status::timedout" +exit 1 diff --git a/.maintain/gitlab/check_polkadot_companion_build.sh b/.maintain/gitlab/check_polkadot_companion_build.sh new file mode 100755 index 0000000000000000000000000000000000000000..eb4c6440f46c9fcafbd6a9ea9d78102518a4e619 --- /dev/null +++ b/.maintain/gitlab/check_polkadot_companion_build.sh @@ -0,0 +1,110 @@ +#!/bin/sh +# +# check if a pr is compatible with polkadot companion pr or master if not +# available +# +# to override one that was just mentioned mark companion pr in the body of the +# polkadot pr like +# +# polkadot companion: paritytech/polkadot#567 +# + + +github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" +# use github api v3 in order to access the data without authentication +github_header="Accept: application/vnd.github.v3+json" + +boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } +boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } + + + +boldcat <<-EOT + + +check_polkadot_companion_build +============================== + +this job checks if there is a string in the description of the pr like + +polkadot companion: paritytech/polkadot#567 + + +it will then run cargo check from this polkadot's branch with substrate code +from this pull request. in absence of that string it will check if a polkadot +pr is mentioned and will use the last one instead. if none of the above can be +found it will check if polkadot has a branch of the exact same name than the +substrate's branch. if it can't find anything, it will uses master instead + + +EOT + + +SUBSTRATE_PATH=$(pwd) + +# Clone the current Polkadot master branch into ./polkadot. +git clone --depth 1 https://github.com/paritytech/polkadot.git + +cd polkadot + +# either it's a pull request then check for a companion otherwise use +# polkadot:master +if expr match "${CI_COMMIT_REF_NAME}" '^[0-9]\+$' >/dev/null +then + boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" + + pr_data_file="$(mktemp)" + # get the last reference to a pr in polkadot + curl -sSL -H "${github_header}" -o "${pr_data_file}" \ + "${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME}" + + pr_body="$(sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p' "${pr_data_file}")" + + pr_companion="$(echo "${pr_body}" | sed -n -r \ + -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + | tail -n 1)" + + if [ -z "${pr_companion}" ] + then + pr_companion="$(echo "${pr_body}" | sed -n -r \ + -e 's;^.*paritytech/polkadot/#([0-9]+).*$;\1;p' \ + -e 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + | tail -n 1)" + fi + + if [ "${pr_companion}" ] + then + boldprint "companion pr specified/detected: #${pr_companion}" + git fetch --depth 1 origin refs/pull/${pr_companion}/head:pr/${pr_companion} + git checkout pr/${pr_companion} + else + pr_ref="$(grep -Po '"ref"\s*:\s*"\K(?!master)[^"]*' "${pr_data_file}")" + if git fetch --depth 1 origin "$pr_ref":branch/"$pr_ref" 2>/dev/null + then + boldprint "companion branch detected: $pr_ref" + git checkout branch/"$pr_ref" + else + boldprint "no companion branch found - building polkadot:master" + fi + fi + rm -f "${pr_data_file}" +else + boldprint "this is not a pull request - building polkadot:master" +fi + +# Make sure we override the crates in native and wasm build +# patching the git path as described in the link below did not test correctly +# https://doc.rust-lang.org/cargo/reference/overriding-dependencies.html +mkdir .cargo +echo "paths = [ \"$SUBSTRATE_PATH\" ]" > .cargo/config + +mkdir -p target/debug/wbuild/.cargo +cp .cargo/config target/debug/wbuild/.cargo/config + +# package, others are updated along the way. +cargo update + +# Test Polkadot pr or master branch with this Substrate commit. +time cargo test --all --release --verbose + diff --git a/.maintain/gitlab/check_polkadot_companion_status.sh b/.maintain/gitlab/check_polkadot_companion_status.sh new file mode 100755 index 0000000000000000000000000000000000000000..5387e68f25cbba284433b8946f20f6eacfc76b9f --- /dev/null +++ b/.maintain/gitlab/check_polkadot_companion_status.sh @@ -0,0 +1,100 @@ +#!/bin/sh +# +# check for a polkadot companion pr and ensure it has approvals and is +# mergeable +# + +github_api_substrate_pull_url="https://api.github.com/repos/paritytech/substrate/pulls" +github_api_polkadot_pull_url="https://api.github.com/repos/paritytech/polkadot/pulls" +# use github api v3 in order to access the data without authentication +github_header="Accept: application/vnd.github.v3+json" + +boldprint () { printf "|\n| \033[1m${@}\033[0m\n|\n" ; } +boldcat () { printf "|\n"; while read l; do printf "| \033[1m${l}\033[0m\n"; done; printf "|\n" ; } + + + +boldcat <<-EOT + + +check_polkadot_companion_status +=============================== + +this job checks if there is a string in the description of the pr like + +polkadot companion: paritytech/polkadot#567 + +or any other polkadot pr is mentioned in this pr's description and checks its +status. + + +EOT + + +if ! [ "${CI_COMMIT_REF_NAME}" -gt 0 2>/dev/null ] +then + boldprint "this doesn't seem to be a pull request" + exit 1 +fi + +boldprint "this is pull request no ${CI_COMMIT_REF_NAME}" + +pr_body="$(curl -H "${github_header}" -s ${github_api_substrate_pull_url}/${CI_COMMIT_REF_NAME} \ + | sed -n -r 's/^[[:space:]]+"body": (".*")[^"]+$/\1/p')" + +# get companion if explicitly specified +pr_companion="$(echo "${pr_body}" | sed -n -r \ + -e 's;^.*polkadot companion: paritytech/polkadot#([0-9]+).*$;\1;p' \ + -e 's;^.*polkadot companion: https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + | tail -n 1)" + +# get companion mentioned in the description +if [ -z "${pr_companion}" ] +then + pr_companion="$(echo "${pr_body}" | sed -n -r \ + 's;^.*https://github.com/paritytech/polkadot/pull/([0-9]+).*$;\1;p' \ + | tail -n 1)" +fi + +if [ -z "${pr_companion}" ] +then + boldprint "no companion pr found" + exit 0 +fi + +boldprint "companion pr: #${pr_companion}" + +# check the status of that pull request - needs to be +# mergable and approved + +curl -H "${github_header}" -sS -o companion_pr.json \ + ${github_api_polkadot_pull_url}/${pr_companion} + +if jq -e .merged < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} already merged" + exit 0 +fi + +if jq -e '.mergeable' < companion_pr.json >/dev/null +then + boldprint "polkadot pr #${pr_companion} mergeable" +else + boldprint "polkadot pr #${pr_companion} not mergeable" + exit 1 +fi + +curl -H "${github_header}" -sS -o companion_pr_reviews.json \ + ${github_api_polkadot_pull_url}/${pr_companion}/reviews + +if [ -n "$(jq -r -e '.[].state | select(. == "CHANGES_REQUESTED")' < companion_pr_reviews.json)" ] && \ + [ -z "$(jq -r -e '.[].state | select(. == "APPROVED")' < companion_pr_reviews.json)" ] +then + boldprint "polkadot pr #${pr_companion} not APPROVED" + exit 1 +fi + +boldprint "polkadot pr #${pr_companion} state APPROVED" +exit 0 + + diff --git a/.maintain/gitlab/check_runtime.sh b/.maintain/gitlab/check_runtime.sh index 2355461cb78485ea3b4d63049560f3e33c474236..5b7e25e3afc4eabef01d984047a98d675ab09757 100755 --- a/.maintain/gitlab/check_runtime.sh +++ b/.maintain/gitlab/check_runtime.sh @@ -33,6 +33,8 @@ git log --graph --oneline --decorate=short -n 10 boldprint "make sure the master branch and release tag are available in shallow clones" git fetch --depth=${GIT_DEPTH:-100} origin master git fetch --depth=${GIT_DEPTH:-100} origin release +git tag -f release FETCH_HEAD +git log -n1 release boldprint "check if the wasm sources changed" diff --git a/.maintain/gitlab/generate_changelog.sh b/.maintain/gitlab/generate_changelog.sh new file mode 100755 index 0000000000000000000000000000000000000000..ba2a507e4cac694f918aa2c88cf1585991faa373 --- /dev/null +++ b/.maintain/gitlab/generate_changelog.sh @@ -0,0 +1,73 @@ +#!/usr/bin/env bash + +# shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +version="$2" +last_version="$1" + +all_changes="$(sanitised_git_logs "$last_version" "$version")" +runtime_changes="" +api_changes="" +client_changes="" +changes="" + +while IFS= read -r line; do + pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') + + # Skip if the PR has the silent label - this allows us to skip a few requests + if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then + continue + fi + if has_label 'paritytech/substrate' "$pr_id" 'B1-runtimenoteworthy'; then + runtime_changes="$runtime_changes +$line" + fi + if has_label 'paritytech/substrate' "$pr_id" 'B1-clientnoteworthy'; then + client_changes="$client_changes +$line" + fi + if has_label 'paritytech/substrate' "$pr_id" 'B1-apinoteworthy' ; then + api_changes="$api_changes +$line" + continue + fi +done <<< "$all_changes" + +# Make the substrate section if there are any substrate changes +if [ -n "$runtime_changes" ] || + [ -n "$api_changes" ] || + [ -n "$client_changes" ]; then + changes=$(cat << EOF +Substrate changes +----------------- + +EOF +) + if [ -n "$runtime_changes" ]; then + changes="$changes + +Runtime +------- +$runtime_changes" + fi + if [ -n "$client_changes" ]; then + changes="$changes + +Client +------ +$client_changes" + fi + if [ -n "$api_changes" ]; then + changes="$changes + +API +--- +$api_changes" + fi + release_text="$release_text + +$changes" +fi + +echo "$changes" diff --git a/.maintain/gitlab/lib.sh b/.maintain/gitlab/lib.sh index c8b2d73e6097f42fd6590b5e3d78c537dc620028..ecc9a5f54288cd4636a05cb7f2a2d0535e40ebe9 100755 --- a/.maintain/gitlab/lib.sh +++ b/.maintain/gitlab/lib.sh @@ -15,11 +15,23 @@ sanitised_git_logs(){ } # Returns the last published release on github +# Note: we can't just use /latest because that ignores prereleases # repo: 'organization/repo' # Usage: last_github_release "$repo" last_github_release(){ - curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" \ - -s "$api_base/$1/releases/latest" | jq '.tag_name' + i=0 + # Iterate over releases until we find the last release that's not just a draft + while [ $i -lt 29 ]; do + out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$1/releases" | jq ".[$i]") + echo "$out" + # Ugh when echoing to jq, we need to translate newlines into spaces :/ + if [ "$(echo "$out" | tr '\r\n' ' ' | jq '.draft')" = "false" ]; then + echo "$out" | tr '\r\n' ' ' | jq '.tag_name' + return + else + i=$((i + 1)) + fi + done } # Checks whether a tag on github has been verified diff --git a/.maintain/gitlab/publish_draft_release.sh b/.maintain/gitlab/publish_draft_release.sh index 4f73575f5bbaf836d7e07cb43c7fd1c1173858da..c5813718a69f27636b2729d8bc2cc7fbf05561fa 100755 --- a/.maintain/gitlab/publish_draft_release.sh +++ b/.maintain/gitlab/publish_draft_release.sh @@ -3,44 +3,12 @@ # shellcheck source=lib.sh source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" -# Substrate labels for PRs we want to include in the release notes -labels=( - 'B1-runtimenoteworthy' - 'B1-clientnoteworthy' - 'B1-apinoteworthy' -) - version="$CI_COMMIT_TAG" # Note that this is not the last *tagged* version, but the last *published* version last_version=$(last_github_release 'paritytech/substrate') -echo "[+] Version: $version; Previous version: $last_version" - -all_changes="$(sanitised_git_logs "$last_version" "$version")" -labelled_changes="" -echo "[+] Iterating through $(wc -l <<< "$all_changes") changes to find labelled PRs" -while IFS= read -r line; do - pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') - - # Skip if the PR has the silent label - this allows us to skip a few requests - if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then - continue - fi - for label in "${labels[@]}"; do - if has_label 'paritytech/substrate' "$pr_id" "$label"; then - labelled_changes="$labelled_changes -$line" - fi - done -done <<< "$all_changes" - - -release_text="Substrate $version ------------------ -$labelled_changes" -echo "[+] Release text generated: " -echo "$release_text" +release_text="$(./generate_release_text.sh "$last_version" "$version")" echo "[+] Pushing release to github" # Create release on github diff --git a/.maintain/node-template-release/Cargo.toml b/.maintain/node-template-release/Cargo.toml index 606def19bb99f1002058d1d868181cbc9c0fbb56..dd3166d58ddf463580474a02ea70efdcdbf00228 100644 --- a/.maintain/node-template-release/Cargo.toml +++ b/.maintain/node-template-release/Cargo.toml @@ -16,3 +16,6 @@ git2 = "0.8" flate2 = "1.0" [workspace] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/Cargo.lock b/Cargo.lock index ac67db2e52069420c5ae075de63e6b592bd833a4..31badd989fe1ac3418e356304f776bff0a05ff69 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,13 +61,24 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.9" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5e63fd144e18ba274ae7095c0197a870a7b9468abc801dd62f190d80817d2ec" +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" dependencies = [ "memchr", ] +[[package]] +name = "alga" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4f823d037a7ec6ea2197046bafd4ae150e6bc36f9ca347404f46a46823fa84f2" +dependencies = [ + "approx", + "num-complex", + "num-traits 0.2.11", +] + [[package]] name = "ansi_term" version = "0.11.0" @@ -88,9 +99,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.26" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7825f6833612eb2414095684fcf6c635becf3ce97fe48cf6421321e93bfbd53c" +checksum = "d9a60d744a80c30fcb657dfe2c1b22bcb3e814c1a1e3674f32bf5820b570fbff" [[package]] name = "app_dirs" @@ -104,11 +115,26 @@ dependencies = [ "xdg", ] +[[package]] +name = "approx" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0e60b75072ecd4168020818c0107f2857bb6c4e64252d8d3983f6263b40a5c3" +dependencies = [ + "num-traits 0.2.11", +] + +[[package]] +name = "arbitrary" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75153c95fdedd7db9732dfbfc3702324a1627eec91ba56e37cd0ac78314ab2ed" + [[package]] name = "arc-swap" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7b8a9123b8027467bce0099fe556c628a53c8d83df0507084c31e9ba2e39aff" +checksum = "d663a8e9a99154b5fb793032533f6328da35e23aac63d5c152279aa8ba356825" [[package]] name = "arrayref" @@ -146,21 +172,21 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "assert_cmd" -version = "0.12.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6283bac8dd7226470d491bc4737816fea4ca1fba7a2847f2e9097fd6bfb4624c" +checksum = "c88b9ca26f9c16ec830350d309397e74ee9abdfd8eb1f71cb6ecc71a3fc818da" dependencies = [ "doc-comment", - "escargot", "predicates", "predicates-core", "predicates-tree", + "wait-timeout", ] [[package]] @@ -184,7 +210,7 @@ dependencies = [ "futures-io", "futures-timer 2.0.2", "kv-log-macro", - "log 0.4.8", + "log", "memchr", "mio", "mio-uds", @@ -242,9 +268,9 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.44" +version = "0.3.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4036b9bf40f3cf16aba72a3d65e8a520fc4bafcdc7079aea8f848c58c5b5536" +checksum = "b1e692897359247cc6bb902933361652380af0f1b7651ae5c5013407f30e109e" dependencies = [ "backtrace-sys", "cfg-if", @@ -254,9 +280,9 @@ dependencies = [ [[package]] name = "backtrace-sys" -version = "0.1.32" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6575f128516de27e3ce99689419835fce9643a9b215a14d2b5b685be018491" +checksum = "7de8aba10a69c8e8d7622c5710229485ec32e9d55fdad160ea559c086fdcd118" dependencies = [ "cc", "libc", @@ -268,16 +294,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" -[[package]] -name = "base64" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" -dependencies = [ - "byteorder 1.3.4", - "safemem", -] - [[package]] name = "base64" version = "0.10.1" @@ -305,9 +321,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.53.1" +version = "0.53.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99de13bb6361e01e493b3db7928085dcc474b7ba4f5481818e53a89d76b8393f" +checksum = "6bb26d6a69a335b8cb0e7c7e9775cd5666611dc50a37177c3f2cedcfc040e8c8" dependencies = [ "bitflags", "cexpr", @@ -317,10 +333,10 @@ dependencies = [ "env_logger 0.7.1", "lazy_static", "lazycell", - "log 0.4.8", + "log", "peeking_take_while", "proc-macro2", - "quote", + "quote 1.0.3", "regex", "rustc-hash", "shlex", @@ -341,9 +357,13 @@ checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" [[package]] name = "bitvec" -version = "0.15.2" +version = "0.17.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a993f74b4c99c1908d156b8d2e0fb6277736b0ecbd833982fd1241d39b2766a6" +checksum = "41262f11d771fd4a61aa3ce019fca363b4b6c282fca9da2a31186d3965a47a5c" +dependencies = [ + "either", + "radium", +] [[package]] name = "blake2" @@ -378,6 +398,17 @@ dependencies = [ "constant_time_eq", ] +[[package]] +name = "blake2s_simd" +version = "0.5.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab9e07352b829279624ceb7c64adb4f585dacdb81d35cafae81139ccd617cf44" +dependencies = [ + "arrayref", + "arrayvec 0.5.1", + "constant_time_eq", +] + [[package]] name = "block-buffer" version = "0.7.3" @@ -430,9 +461,9 @@ checksum = "b170cd256a3f9fa6b9edae3e44a7dfdfc77e8124dbc3e2612d75f9c3e2396dae" [[package]] name = "bstr" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "502ae1441a0a5adb8fbd38a5955a6416b9493e92b465de5e4a9bde6a539c2c48" +checksum = "2889e6d50f394968c8bf4240dc3f2a7eb4680844d27308f798229ac9d4725f41" dependencies = [ "lazy_static", "memchr", @@ -451,9 +482,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742" +checksum = "12ae9db68ad7fac5fe51304d20f016c911539251075a214f8e663babefa35187" [[package]] name = "byte-slice-cast" @@ -496,15 +527,6 @@ version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "130aac562c0dd69c56b3b1cc8ffd2e17be31d0b6c25b61c96b76231aa23e39e1" -[[package]] -name = "c2-chacha" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" -dependencies = [ - "ppv-lite86", -] - [[package]] name = "c_linked_list" version = "1.1.1" @@ -543,9 +565,9 @@ dependencies = [ [[package]] name = "cexpr" -version = "0.3.6" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fce5b5fb86b0c57c20c834c1b412fd09c77c8a59b9473f86272709e78874cd1d" +checksum = "f4aedb84272dbe89af497cf81375129abda4fc0a9e7c5d317498c15cc30c0d27" dependencies = [ "nom", ] @@ -567,11 +589,12 @@ dependencies = [ [[package]] name = "chain-spec-builder" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "ansi_term 0.12.1", "node-cli", "rand 0.7.3", + "sc-chain-spec", "sc-keystore", "sp-core", "structopt", @@ -579,22 +602,22 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.10" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31850b4a4d6bae316f7a09e691c944c28299298837edc0a03f755618c23cbc01" +checksum = "80094f509cf8b5ae86a4966a39b3ff66cd7e2a3e594accec3743ff3fabeab5b2" dependencies = [ "js-sys", "num-integer", - "num-traits", + "num-traits 0.2.11", "time", "wasm-bindgen", ] [[package]] name = "clang-sys" -version = "0.28.1" +version = "0.29.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81de550971c976f176130da4b2978d3b524eaa0fd9ac31f3ceb5ae1231fb4853" +checksum = "fe6837df1d5cba2397b835c8530f51723267e16abbf83892e9e5af4f0e5dd10a" dependencies = [ "glob 0.3.0", "libc", @@ -659,7 +682,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7871d2947441b0fdd8e2bd1ce2a2f75304f896582c0d572162d48290683c48" dependencies = [ - "log 0.4.8", + "log", "web-sys", ] @@ -689,32 +712,16 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "core-foundation" -version = "0.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d" -dependencies = [ - "core-foundation-sys 0.6.2", - "libc", -] - [[package]] name = "core-foundation" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57d24c7a13c43e870e37c1556b74555437870a04514f7685f5b354e090567171" dependencies = [ - "core-foundation-sys 0.7.0", + "core-foundation-sys", "libc", ] -[[package]] -name = "core-foundation-sys" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" - [[package]] name = "core-foundation-sys" version = "0.7.0" @@ -724,7 +731,8 @@ checksum = "b3a71ab494c0b5b860bdc8407ae08978052417070c2ced38573a9157ad75b8ac" [[package]] name = "cranelift-bforest" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45a9c21f8042b9857bda93f6c1910b9f9f24100187a3d3d52f214a34e3dc5818" dependencies = [ "cranelift-entity", ] @@ -732,7 +740,8 @@ dependencies = [ [[package]] name = "cranelift-codegen" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7853f77a6e4a33c67a69c40f5e1bb982bd2dc5c4a22e17e67b65bbccf9b33b2e" dependencies = [ "byteorder 1.3.4", "cranelift-bforest", @@ -740,7 +749,7 @@ dependencies = [ "cranelift-codegen-shared", "cranelift-entity", "gimli", - "log 0.4.8", + "log", "serde", "smallvec 1.2.0", "target-lexicon", @@ -750,7 +759,8 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "084cd6d5fb0d1da28acd72c199471bfb09acc703ec8f3bf07b1699584272a3b9" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -759,12 +769,14 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "701b599783305a58c25027a4d73f2d6b599b2d8ef3f26677275f480b4d51e05d" [[package]] name = "cranelift-entity" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b88e792b28e1ebbc0187b72ba5ba880dad083abe9231a99d19604d10c9e73f38" dependencies = [ "serde", ] @@ -772,10 +784,11 @@ dependencies = [ [[package]] name = "cranelift-frontend" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "518344698fa6c976d853319218415fdfb4f1bc6b42d0b2e2df652e55dff1f778" dependencies = [ "cranelift-codegen", - "log 0.4.8", + "log", "smallvec 1.2.0", "target-lexicon", ] @@ -783,7 +796,8 @@ dependencies = [ [[package]] name = "cranelift-native" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32daf082da21c0c05d93394ff4842c2ab7c4991b1f3186a1d952f8ac660edd0b" dependencies = [ "cranelift-codegen", "raw-cpuid", @@ -793,12 +807,13 @@ dependencies = [ [[package]] name = "cranelift-wasm" version = "0.59.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2aa816f554a3ef739a5d17ca3081a1f8983f04c944ea8ff60fb8d9dd8cd2d7b" dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "log 0.4.8", + "log", "serde", "thiserror", "wasmparser", @@ -827,7 +842,7 @@ dependencies = [ "itertools", "lazy_static", "libc", - "num-traits", + "num-traits 0.2.11", "rand_core 0.3.1", "rand_os", "rand_xoshiro", @@ -853,7 +868,7 @@ dependencies = [ "csv", "itertools", "lazy_static", - "num-traits", + "num-traits 0.2.11", "oorandom", "plotters", "rayon", @@ -996,8 +1011,8 @@ version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" dependencies = [ - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -1020,19 +1035,6 @@ dependencies = [ "rand 0.3.23", ] -[[package]] -name = "curve25519-dalek" -version = "1.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" -dependencies = [ - "byteorder 1.3.4", - "clear_on_drop", - "digest", - "rand_core 0.3.1", - "subtle 2.2.2", -] - [[package]] name = "curve25519-dalek" version = "2.0.0" @@ -1043,7 +1045,7 @@ dependencies = [ "digest", "rand_core 0.5.1", "subtle 2.2.2", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -1054,13 +1056,13 @@ checksum = "11c0346158a19b3627234e15596f5e465c360fcdb97d817bcb255e0510f5a788" [[package]] name = "derive_more" -version = "0.99.3" +version = "0.99.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a806e96c59a76a5ba6e18735b6cf833344671e61e7863f2edb5c518ea2cac95c" +checksum = "e2323f3f47db9a0e77ce7a300605d8d2098597fc451ed1a97bb1f6411bb550a7" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -1112,9 +1114,9 @@ dependencies = [ [[package]] name = "doc-comment" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923dea538cea0aa3025e8685b20d6ee21ef99c4f77e954a30febbaac5ec73a97" +checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" [[package]] name = "ed25519-dalek" @@ -1123,7 +1125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" dependencies = [ "clear_on_drop", - "curve25519-dalek 2.0.0", + "curve25519-dalek", "rand 0.7.3", "sha2", ] @@ -1134,6 +1136,17 @@ version = "1.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb1f6b1ce1c140482ea30ddd3335fc0024ac7ee112895426e0a629a6c20adfe3" +[[package]] +name = "enum-primitive-derive" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2b90e520ec62c1864c8c78d637acbfe8baf5f63240f2fb8165b8325c07812dd" +dependencies = [ + "num-traits 0.1.43", + "quote 0.3.15", + "syn 0.11.11", +] + [[package]] name = "enumflags2" version = "0.6.2" @@ -1150,8 +1163,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecf634c5213044b8d54a46dd282cf5dd1f86bb5cb53e92c409cb4680a7fb9894" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -1162,7 +1175,7 @@ checksum = "aafcde04e90a5226a6443b7aabdb016ba2f8307c847d524724bd9b346dd1a2d3" dependencies = [ "atty", "humantime", - "log 0.4.8", + "log", "regex", "termcolor", ] @@ -1175,7 +1188,7 @@ checksum = "44533bbbb3bb3c1fa17d9f2e4e38bbbaf8396ba82193c4cb1b6445d711445d36" dependencies = [ "atty", "humantime", - "log 0.4.8", + "log", "regex", "termcolor", ] @@ -1188,18 +1201,18 @@ checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" [[package]] name = "erased-serde" -version = "0.3.10" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7d80305c9bd8cd78e3c753eb9fb110f83621e5211f1a3afffcc812b104daf9" +checksum = "d88b6d1705e16a4d62e05ea61cc0496c2bd190f4fa8e5c1f11ce747be6bcf3d1" dependencies = [ "serde", ] [[package]] name = "errno" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c2a071601ed01b988f896ab14b95e67335d1eeb50190932a1320f7fe3cadc84e" +checksum = "b480f641ccf0faf324e20c1d3e53d81b7484c698b42ea677f6907ae4db195371" dependencies = [ "errno-dragonfly", "libc", @@ -1216,50 +1229,38 @@ dependencies = [ "libc", ] -[[package]] -name = "escargot" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74cf96bec282dcdb07099f7e31d9fed323bca9435a09aba7b6d99b7617bca96d" -dependencies = [ - "lazy_static", - "log 0.4.8", - "serde", - "serde_json", -] - [[package]] name = "ethbloom" -version = "0.8.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32cfe1c169414b709cf28aa30c74060bdb830a03a8ba473314d079ac79d80a5f" +checksum = "9e7abcddbdd5db30aeed4deb586adc4824e6c247e2f7238d1187f752893f096b" dependencies = [ "crunchy", "fixed-hash", "impl-rlp", - "impl-serde 0.2.3", - "tiny-keccak 1.5.0", + "impl-serde 0.3.0", + "tiny-keccak 2.0.2", ] [[package]] name = "ethereum-types" -version = "0.8.0" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba744248e3553a393143d5ebb68939fc3a4ec0c22a269682535f5ffe7fed728c" +checksum = "964c23cdee0ca07d5be2a628b46d5c11a2134ce554a8c16d8dbc2db647e4fd4d" dependencies = [ "ethbloom", "fixed-hash", "impl-rlp", - "impl-serde 0.2.3", + "impl-serde 0.3.0", "primitive-types", "uint", ] [[package]] name = "evm" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "272f65e18a2b6449b682bfcdf6c3ccc63db0b93898d89c0fb237548bbfc764a5" +checksum = "23a5c0ebf219b2b878bde1838282e0bb69828338df37fd136f1e93182ae35a59" dependencies = [ "evm-core", "evm-gasometer", @@ -1272,18 +1273,18 @@ dependencies = [ [[package]] name = "evm-core" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66534d42e13d50f9101bed87cb568fd5aa929c600c3c13f8dadbbf39f5635945" +checksum = "d944a07232006a3435df8aa014fd364ed04cb28d731782339e9c56436594f2d4" dependencies = [ "primitive-types", ] [[package]] name = "evm-gasometer" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39bc5b592803ca644781fe2290b7305ea5182f7c9516d615ddfb2308c2cab639" +checksum = "6a0d986953234d3786d0ca1beaaabab6a581d2128f8ec36c8c57e9c45e3d2b32" dependencies = [ "evm-core", "evm-runtime", @@ -1292,9 +1293,9 @@ dependencies = [ [[package]] name = "evm-runtime" -version = "0.15.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "389e4b447fb26971a9c76c8aa49c0ab435f8e46e8fc46e1bc4ebf01f3c2b428f" +checksum = "1833c22f9518007d3cc28e14ff586263543516a1c7a147b260c603e4deb95403" dependencies = [ "evm-core", "primitive-types", @@ -1319,7 +1320,7 @@ dependencies = [ "anyhow", "goblin", "indexmap", - "log 0.4.8", + "log", "scroll", "string-interner", "target-lexicon", @@ -1343,8 +1344,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", "synstructure", ] @@ -1362,9 +1363,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fdlimit" -version = "0.1.2" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9084c55bb76efb1496328976db88320fe7d9ee86e649e83c4ecce3ba7a9a35d1" +checksum = "0da54a593b34c71b889ee45f5b5bb900c74148c5f7f8c6a9479ee7899f69603c" dependencies = [ "libc", ] @@ -1376,19 +1377,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8505b75b31ef7285168dd237c4a7db3c1f3e0927e7d314e670bc98e854272fe9" dependencies = [ "env_logger 0.6.2", - "log 0.4.8", + "log", ] [[package]] name = "finality-grandpa" -version = "0.11.1" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbb25bef9fcad97fb31e817da280b1c9174435b8769c770ee190a330dd181ea" +checksum = "024517816630be5204eba201e8d1d405042b1255a5e0e3f298b054fc24d59e1d" dependencies = [ "futures 0.3.4", "futures-timer 2.0.2", - "log 0.4.8", - "num-traits", + "log", + "num-traits 0.2.11", "parity-scale-codec", "parking_lot 0.9.0", "rand 0.6.5", @@ -1396,12 +1397,11 @@ dependencies = [ [[package]] name = "fixed-hash" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3367952ceb191f4ab95dd5685dc163ac539e36202f9fcfd0cb22f9f9c542fefc" +checksum = "32529fc42e86ec06e5047092082aab9ad459b070c5d2a76b14f4f5ce70bf2e84" dependencies = [ "byteorder 1.3.4", - "libc", "rand 0.7.3", "rustc-hex", "static_assertions", @@ -1415,9 +1415,9 @@ checksum = "37ab347416e802de484e4d03c7316c48f1ecb56574dfd4a46a80f173ce1de04d" [[package]] name = "flate2" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6bd6d6f4752952feb71363cffc9ebac9411b75b87c6ab6058c40c8900cf43c0f" +checksum = "2cfff41391129e0a856d6d822600b8d71179d46879e310417eb9c762eb178b42" dependencies = [ "cfg-if", "crc32fast", @@ -1432,35 +1432,22 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2fad85553e09a6f881f739c29f0b00b0f01357c743266d478b68951ce23285f3" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "fork-tree" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", + "linregress", "parity-scale-codec", + "paste", "sp-api", "sp-io", "sp-runtime", @@ -1470,7 +1457,7 @@ dependencies = [ [[package]] name = "frame-benchmarking-cli" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "parity-scale-codec", @@ -1479,6 +1466,8 @@ dependencies = [ "sc-client-db", "sc-executor", "sc-service", + "sp-core", + "sp-externalities", "sp-runtime", "sp-state-machine", "structopt", @@ -1486,7 +1475,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -1505,7 +1494,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0-alpha.3" +version = "11.0.0-alpha.5" dependencies = [ "parity-scale-codec", "serde", @@ -1515,14 +1504,14 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "bitmask", "frame-metadata", "frame-support-procedural", "frame-system", "impl-trait-for-tuples", - "log 0.4.8", + "log", "once_cell", "parity-scale-codec", "paste", @@ -1540,32 +1529,32 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support-procedural-tools", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "frame-support-procedural-tools" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -1586,7 +1575,7 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "criterion 0.2.11", "frame-support", @@ -1604,7 +1593,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-api", @@ -1731,7 +1720,7 @@ dependencies = [ "futures 0.1.29", "futures 0.3.4", "lazy_static", - "log 0.4.8", + "log", "parking_lot 0.9.0", "pin-project", "serde", @@ -1764,8 +1753,8 @@ checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" dependencies = [ "proc-macro-hack", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -1814,7 +1803,6 @@ dependencies = [ "proc-macro-hack", "proc-macro-nested", "slab", - "tokio-io", ] [[package]] @@ -1918,22 +1906,22 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "globset" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "925aa2cac82d8834e2b2a4415b6f6879757fb5c0928fc445ae76461a12eed8f2" +checksum = "7ad1da430bd7281dde2576f44c84cc3f0f7b475e7202cd503042dff01a8c8120" dependencies = [ "aho-corasick", "bstr", "fnv", - "log 0.4.8", + "log", "regex", ] [[package]] name = "gloo-timers" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2d17dbd803c2fc86cb1b613adf63192046a7176f383a8302594654752c4c4a" +checksum = "47204a46aaff920a1ea58b11d03dec6f704287d27561724a4631e450654a891f" dependencies = [ "futures-channel", "futures-core", @@ -1948,7 +1936,7 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3081214398d39e4bd7f2c1975f0488ed04614ffdd976c6fc7a0708278552c0da" dependencies = [ - "log 0.4.8", + "log", "plain", "scroll", ] @@ -1965,7 +1953,7 @@ dependencies = [ "futures 0.1.29", "http 0.1.21", "indexmap", - "log 0.4.8", + "log", "slab", "string", "tokio-io", @@ -1973,20 +1961,20 @@ dependencies = [ [[package]] name = "h2" -version = "0.2.1" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9433d71e471c1736fd5a61b671fc0b148d7a2992f666c958d03cd8feb3b88d1" +checksum = "377038bf3c89d18d6ca1431e7a5027194fbd724ca10592b9487ede5e8e144f42" dependencies = [ "bytes 0.5.4", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.0", + "http 0.2.1", "indexmap", - "log 0.4.8", + "log", "slab", - "tokio 0.2.12", + "tokio 0.2.16", "tokio-util", ] @@ -2026,9 +2014,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.8" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" +checksum = "725cf19794cf90aa94e65050cb4191ff5d8fa87a498383774c47b332e3af952e" dependencies = [ "libc", ] @@ -2079,6 +2067,17 @@ dependencies = [ "hmac", ] +[[package]] +name = "honggfuzz" +version = "0.5.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3de2c3273ef7735df1c5a72128ca85b1d20105b9aac643cdfd7a6e581311150" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + [[package]] name = "http" version = "0.1.21" @@ -2092,9 +2091,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b708cc7f06493459026f53b9a61a7a121a5d1ec6238dee58ea4941132b30156b" +checksum = "28d569972648b2c512421b5f2a405ad6ac9666547189d0c5477a3f200f3e02f9" dependencies = [ "bytes 0.5.4", "fnv", @@ -2120,7 +2119,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13d5ff830006f7646652e057693569bfe0d51760c0085a071769d142a205111b" dependencies = [ "bytes 0.5.4", - "http 0.2.0", + "http 0.2.1", ] [[package]] @@ -2138,25 +2137,6 @@ dependencies = [ "quick-error", ] -[[package]] -name = "hyper" -version = "0.10.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0652d9a2609a968c14be1a9ea00bf4b1d64e2e1f53a1b51b6fff3a6e829273" -dependencies = [ - "base64 0.9.3", - "httparse", - "language-tags", - "log 0.3.9", - "mime", - "num_cpus", - "time", - "traitobject", - "typeable", - "unicase 1.4.2", - "url 1.7.2", -] - [[package]] name = "hyper" version = "0.12.35" @@ -2172,7 +2152,7 @@ dependencies = [ "httparse", "iovec", "itoa", - "log 0.4.8", + "log", "net2", "rustc_version", "time", @@ -2189,24 +2169,24 @@ dependencies = [ [[package]] name = "hyper" -version = "0.13.2" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa1c527bbc634be72aa7ba31e4e4def9bbb020f5416916279b7c705cd838893e" +checksum = "ed6081100e960d9d74734659ffc9cc91daf1c0fc7aceb8eaa94ee1a3f5046f2e" dependencies = [ "bytes 0.5.4", "futures-channel", "futures-core", "futures-util", - "h2 0.2.1", - "http 0.2.0", + "h2 0.2.4", + "http 0.2.1", "http-body 0.3.1", "httparse", "itoa", - "log 0.4.8", + "log", "net2", "pin-project", "time", - "tokio 0.2.12", + "tokio 0.2.16", "tower-service", "want 0.3.0", ] @@ -2220,28 +2200,15 @@ dependencies = [ "bytes 0.5.4", "ct-logs", "futures-util", - "hyper 0.13.2", - "log 0.4.8", + "hyper 0.13.4", + "log", "rustls 0.17.0", "rustls-native-certs", - "tokio 0.2.12", + "tokio 0.2.16", "tokio-rustls", "webpki", ] -[[package]] -name = "hyper-tls" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a800d6aa50af4b5850b2b0f659625ce9504df908e9733b635720483be26174f" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "hyper 0.12.35", - "native-tls", - "tokio-io", -] - [[package]] name = "idna" version = "0.1.5" @@ -2307,8 +2274,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -2332,6 +2299,16 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "141340095b15ed7491bd3d4ced9d20cebfb826174b6bb03386381f62b01e3d77" +[[package]] +name = "intervalier" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64fa110ec7b8f493f416eed552740d10e7030ad5f63b2308f82c9608ec2df275" +dependencies = [ + "futures 0.3.4", + "futures-timer 2.0.2", +] + [[package]] name = "iovec" version = "0.1.4" @@ -2343,9 +2320,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a859057dc563d1388c1e816f98a1892629075fc046ed06e845b883bb8b2916fb" +checksum = "47be2f14c678be2fdcab04ab1171db51b2762ce6f0a8ee87c8dd4a04ed216135" [[package]] name = "itertools" @@ -2373,9 +2350,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7889c7c36282151f6bf465be4700359318aef36baa951462382eae49e9577cf9" +checksum = "6a27d435371a2fa5b6d2b028a74bbdb1234f308da363226a2854ca3ff8ba7055" dependencies = [ "wasm-bindgen", ] @@ -2389,15 +2366,12 @@ dependencies = [ "failure", "futures 0.1.29", "hyper 0.12.35", - "hyper-tls", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.8", + "log", "serde", "serde_json", - "tokio 0.1.22", "url 1.7.2", - "websocket", ] [[package]] @@ -2407,7 +2381,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe3b688648f1ef5d5072229e2d672ecb92cbff7d1c79bcf3fd5898f3f3df0970" dependencies = [ "futures 0.1.29", - "log 0.4.8", + "log", "serde", "serde_derive", "serde_json", @@ -2430,8 +2404,8 @@ checksum = "8609af8f63b626e8e211f52441fcdb6ec54f1a446606b10d5c89ae9bf8a20058" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -2443,10 +2417,10 @@ dependencies = [ "hyper 0.12.35", "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.8", + "log", "net2", "parking_lot 0.10.0", - "unicase 2.6.0", + "unicase", ] [[package]] @@ -2456,7 +2430,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b31c9b90731276fdd24d896f31bb10aecf2e5151733364ae81123186643d939" dependencies = [ "jsonrpc-core", - "log 0.4.8", + "log", "parking_lot 0.10.0", "serde", ] @@ -2471,10 +2445,10 @@ dependencies = [ "globset", "jsonrpc-core", "lazy_static", - "log 0.4.8", + "log", "tokio 0.1.22", "tokio-codec", - "unicase 2.6.0", + "unicase", ] [[package]] @@ -2485,7 +2459,7 @@ checksum = "b94e5773b2ae66e0e02c80775ce6bbba6f15d5bb47c14ec36a36fcf94f8df851" dependencies = [ "jsonrpc-core", "jsonrpc-server-utils", - "log 0.4.8", + "log", "parking_lot 0.10.0", "slab", "ws", @@ -2524,25 +2498,24 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c54d9f465d530a752e6ebdc217e081a7a614b48cb200f6f0aee21ba6bc9aabb" dependencies = [ - "log 0.4.8", + "log", ] [[package]] name = "kvdb" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03080afe6f42cd996da9f568d6add5d7fb5ee2ea7fb7802d2d2cbd836958fd87" +checksum = "cad096c6849b2ef027fabe35c4aed356d0e3d3f586d0a8361e5e17f1e50a7ce5" dependencies = [ - "parity-bytes", "parity-util-mem", "smallvec 1.2.0", ] [[package]] name = "kvdb-memorydb" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9355274e5a9e0a7e8ef43916950eae3949024de2a8dffe4d5a6c13974a37c8e" +checksum = "4aa954d12cfac958822dfd77aab34f3eec71f103b918c4ab79ab59a36ee594ea" dependencies = [ "kvdb", "parity-util-mem", @@ -2551,14 +2524,14 @@ dependencies = [ [[package]] name = "kvdb-rocksdb" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1fecd50b14a534125228d7039951f92aaff742aff151c04546347aba4d3b4fbc" +checksum = "b3f14c3a10c8894d26175e57e9e26032e6d6c49c30cbe2468c5bf5f6b64bb0be" dependencies = [ "fs-swap", "interleaved-ordered", "kvdb", - "log 0.4.8", + "log", "num_cpus", "owning_ref", "parity-util-mem", @@ -2570,27 +2543,21 @@ dependencies = [ [[package]] name = "kvdb-web" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a985c47b4c46429e96033ebf6eaed784a81ceccb4e5df13d63f3b9078a4df81" +checksum = "26f96eec962af83cdf7c83036b3dbb0ae6a1249ddab746820618e2567ca8ebcd" dependencies = [ "futures 0.3.4", "js-sys", "kvdb", "kvdb-memorydb", - "log 0.4.8", + "log", "parity-util-mem", "send_wrapper 0.3.0", "wasm-bindgen", "web-sys", ] -[[package]] -name = "language-tags" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a91d884b6667cd606bb5a69aa0c99ba811a115fc68915e7056ec08a46e93199a" - [[package]] name = "lazy_static" version = "1.4.0" @@ -2611,9 +2578,21 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.67" +version = "0.2.68" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dea0c0405123bba743ee3f91f49b1c7cfb684eef0da0a50110f758ccf24cdff0" + +[[package]] +name = "libflate" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" +checksum = "d9135df43b1f5d0e333385cb6e7897ecd1a43d7d11b91ac003f4d2c2d2401fdd" +dependencies = [ + "adler32", + "crc32fast", + "rle-decode-fast", + "take_mut", +] [[package]] name = "libloading" @@ -2625,11 +2604,17 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "libm" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7d73b3f436185384286bd8098d17ec07c9a7d2388a6599f824d8502b529702a" + [[package]] name = "libp2p" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bba17ee9cac4bb89de5812159877d9b4f0a993bf41697a5a875940cd1eb71f24" +checksum = "8a261244b8d7ff58f5d62ffa33589eb1ba7733a1dfee0902ad9fdfe62ada7009" dependencies = [ "bytes 0.5.4", "futures 0.3.4", @@ -2655,8 +2640,8 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "parity-multiaddr", - "parity-multihash", + "multihash", + "parity-multiaddr 0.8.0", "parking_lot 0.10.0", "pin-project", "smallvec 1.2.0", @@ -2665,22 +2650,23 @@ dependencies = [ [[package]] name = "libp2p-core" -version = "0.16.0" +version = "0.17.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b874594c4b29de1a29f27871feba8e6cd13aa54a8a1e8f8c7cf3dfac5ca287c" +checksum = "1cfe1412f2afe1366a2661abd211bb1a27ee6a664d799071282f4fba997c6858" dependencies = [ "asn1_der", "bs58", "ed25519-dalek", + "either", "fnv", "futures 0.3.4", "futures-timer 3.0.2", "lazy_static", "libsecp256k1", - "log 0.4.8", + "log", + "multihash", "multistream-select", - "parity-multiaddr", - "parity-multihash", + "parity-multiaddr 0.8.0", "parking_lot 0.10.0", "pin-project", "prost", @@ -2693,24 +2679,24 @@ dependencies = [ "thiserror", "unsigned-varint", "void", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "libp2p-core-derive" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d472e9d522f588805c77801de10b957be84e10f019ca5f869fa1825b15ea9b" +checksum = "a0eeb25d5f152a826eac57c7d1cc3de10d72dc4051e90fe4c0cd139f07a069a3" dependencies = [ - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "libp2p-deflate" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e25004d4d9837b44b22c5f1a69be1724a5168fef6cff1716b5176a972c3aa62" +checksum = "136fcef31e3247f51946c3ebefb94d0798c4c8aae78bc59cb7431b220b5330cf" dependencies = [ "flate2", "futures 0.3.4", @@ -2719,20 +2705,20 @@ dependencies = [ [[package]] name = "libp2p-dns" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b99e552f9939b606eb4b59f7f64d9b01e3f96752f47e350fc3c5fc646ed3f649" +checksum = "647178f8683bf868f7f14d5e5718dbdc2445b9f6b24ce99da96cecd7c5d2d1a6" dependencies = [ "futures 0.3.4", "libp2p-core", - "log 0.4.8", + "log", ] [[package]] name = "libp2p-floodsub" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3234f12e44f9a50351a9807b97fe7de11eb9ae4482370392ba10da6dc90722" +checksum = "34c8dee172fd1630caf91a427d601d6a8d24c8cfcbcf7d5c09c9a1f3b4bbebc2" dependencies = [ "cuckoofilter", "fnv", @@ -2747,9 +2733,9 @@ dependencies = [ [[package]] name = "libp2p-gossipsub" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d46cb3e0841bd951cbf4feae56cdc081e6347836a644fb260c3ec554149b4006" +checksum = "0042a2156fb6264bda9def93070e411dfaddf8c57c4b2d63634190d296458c76" dependencies = [ "base64 0.11.0", "byteorder 1.3.4", @@ -2759,7 +2745,7 @@ dependencies = [ "futures_codec", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log", "lru", "prost", "prost-build", @@ -2772,14 +2758,14 @@ dependencies = [ [[package]] name = "libp2p-identify" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfeb935a9bd41263e4f3a24b988e9f4a044f3ae89ac284e83c17fe2f84e0d66b" +checksum = "04efa011cda5232648b5aa50bd80be7ba0a695d682b01aa46b65e5be5ece0605" dependencies = [ "futures 0.3.4", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log", "prost", "prost-build", "smallvec 1.2.0", @@ -2788,9 +2774,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "464dc8412978d40f0286be72ed9ab5e0e1386a4a06e7f174526739b5c3c1f041" +checksum = "97f4722d83af8fc0065cee7589a000b629961c1c11d90ba09f6685b3e123b9ae" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.4", @@ -2800,8 +2786,8 @@ dependencies = [ "futures_codec", "libp2p-core", "libp2p-swarm", - "log 0.4.8", - "parity-multihash", + "log", + "multihash", "prost", "prost-build", "rand 0.7.3", @@ -2815,9 +2801,9 @@ dependencies = [ [[package]] name = "libp2p-mdns" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881fcfb360c2822db9f0e6bb6f89529621556ed9a8b038313414eda5107334de" +checksum = "b752276b3bea2fca1c291f43cefc8082d8a639bb8f9052cf5adc6accfcd7b44e" dependencies = [ "async-std", "data-encoding", @@ -2827,7 +2813,7 @@ dependencies = [ "lazy_static", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log", "net2", "rand 0.7.3", "smallvec 1.2.0", @@ -2837,31 +2823,31 @@ dependencies = [ [[package]] name = "libp2p-mplex" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8507b37ad0eed275efcde67a023c3d85af6c80768b193845b9288e848e1af95" +checksum = "0f317db8c062beecde87a8765ca03784e6f1a55daa5b9868bf60ebf9b9a2b92f" dependencies = [ "bytes 0.5.4", "fnv", "futures 0.3.4", "futures_codec", "libp2p-core", - "log 0.4.8", + "log", "parking_lot 0.10.0", "unsigned-varint", ] [[package]] name = "libp2p-noise" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15a8a3d71f898beb6f854c8aae27aa1d198e0d1f2e49412261c2d90ef39675a" +checksum = "98d3845f54288ff134dd78c131517bad8bc03965def6e6517efef03291d9b4d7" dependencies = [ - "curve25519-dalek 2.0.0", + "curve25519-dalek", "futures 0.3.4", "lazy_static", "libp2p-core", - "log 0.4.8", + "log", "prost", "prost-build", "rand 0.7.3", @@ -2869,19 +2855,19 @@ dependencies = [ "snow", "static_assertions", "x25519-dalek", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "libp2p-ping" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d22f2f228b3a828dca1cb8aa9fa331e0bc9c36510cb2c1916956e20dc85e8c" +checksum = "aa1cb80ccbedb91d9b980aafc6bf39dc7e4616a7e37c631a4e6ef62629567a13" dependencies = [ "futures 0.3.4", "libp2p-core", "libp2p-swarm", - "log 0.4.8", + "log", "rand 0.7.3", "void", "wasm-timer", @@ -2889,15 +2875,15 @@ dependencies = [ [[package]] name = "libp2p-plaintext" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56126a204d7b3382bac163143ff4125a14570b3ba76ba979103d1ae1abed1923" +checksum = "da16d35e3990cc5dc22c8d7ea4a2aa1c18f518491bb29c0c3498fb9a2d8e486e" dependencies = [ "bytes 0.5.4", "futures 0.3.4", "futures_codec", "libp2p-core", - "log 0.4.8", + "log", "prost", "prost-build", "rw-stream-sink", @@ -2907,12 +2893,12 @@ dependencies = [ [[package]] name = "libp2p-pnet" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b916938a8868f75180aeeffcc6a516a922d165e8fa2a90b57bad989d1ccbb57a" +checksum = "45d11e8c6d83e294ef3d7ff3a9f5a7aa5aa0c39c2d4991f2905c23c438c84526" dependencies = [ "futures 0.3.4", - "log 0.4.8", + "log", "pin-project", "rand 0.7.3", "salsa20", @@ -2921,9 +2907,9 @@ dependencies = [ [[package]] name = "libp2p-secio" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1219e9ecb4945d7331a05f5ffe96a1f6e28051bfa1223d4c60353c251de0354e" +checksum = "74130fa95effb780850ec790b7af777b893108d9b5983ab994b61d93d2eb0336" dependencies = [ "aes-ctr", "ctr", @@ -2932,7 +2918,7 @@ dependencies = [ "js-sys", "lazy_static", "libp2p-core", - "log 0.4.8", + "log", "parity-send-wrapper", "pin-project", "prost", @@ -2951,13 +2937,14 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.16.1" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "275471e7c0e88ae004660866cd54f603bd8bd1f4caef541a27f50dd8640c4d4c" +checksum = "a4ec53df8978a5d6d9dac243fb1e3adf004f8b8d28f72e6f2160df34d5f39564" dependencies = [ "futures 0.3.4", "libp2p-core", - "log 0.4.8", + "log", + "rand 0.7.3", "smallvec 1.2.0", "void", "wasm-timer", @@ -2965,9 +2952,9 @@ dependencies = [ [[package]] name = "libp2p-tcp" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e80ad4e3535345f3d666554ce347d3100453775611c05c60786bf9a1747a10" +checksum = "e25c9d9c5448c189bba7ecdd1ca23800516281476e82810eff711ef04abaf9eb" dependencies = [ "async-std", "futures 0.3.4", @@ -2975,26 +2962,26 @@ dependencies = [ "get_if_addrs", "ipnet", "libp2p-core", - "log 0.4.8", + "log", ] [[package]] name = "libp2p-uds" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76d329564a43da9d0e055a5b938633c4a8ceab1f59cec133fbc4647917c07341" +checksum = "d8dbcbe6567ea1b3c98ba4df5fd9d1b7c2bebbf50d46ceb0c2ce735c55af3f8d" dependencies = [ "async-std", "futures 0.3.4", "libp2p-core", - "log 0.4.8", + "log", ] [[package]] name = "libp2p-wasm-ext" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "923581c055bc4b8c5f42d4ce5ef43e52fe5216f1ea4bc26476cb8a966ce6220b" +checksum = "076446cabb23b0d79d2375661d837a43cbda6719d88787f234e7661c33ef9554" dependencies = [ "futures 0.3.4", "js-sys", @@ -3006,16 +2993,16 @@ dependencies = [ [[package]] name = "libp2p-websocket" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5351ca9eea122081c1c0f9323164d2918cac29b5a6bfe5054d4ba8ec9447cf42" +checksum = "a0117ed6a6f60114c107c1232a0890a8fe997013c7e1920b6f0c811e05d2fae7" dependencies = [ "async-tls", "bytes 0.5.4", "either", "futures 0.3.4", "libp2p-core", - "log 0.4.8", + "log", "quicksink", "rustls 0.16.0", "rw-stream-sink", @@ -3027,9 +3014,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.16.2" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9dac30de24ccde0e67f363d71a125c587bbe6589503f664947e9b084b68a34f1" +checksum = "ee12c49426527908f81ffb6551b95f57149a8ea64f386bb7da3b123cdb9c01ba" dependencies = [ "futures 0.3.4", "libp2p-core", @@ -3094,21 +3081,32 @@ dependencies = [ ] [[package]] -name = "lock_api" -version = "0.3.3" +name = "linregress" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +checksum = "9290cf6f928576eeb9c096c6fad9d8d452a0a1a70a2bbffa6e36064eedc0aac9" dependencies = [ - "scopeguard", + "failure", + "nalgebra", + "statrs", ] [[package]] -name = "log" -version = "0.3.9" +name = "lite-json" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faa835713bb12ba5204013497da16caf2dd2eee25ca829d0efaa054fb38c4ddd" +dependencies = [ + "paste", +] + +[[package]] +name = "lock_api" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e19e8d5c34a3e0e2223db8e060f9e8264aeeb5c5fc64a4ee9965c062211c024b" +checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" dependencies = [ - "log 0.4.8", + "scopeguard", ] [[package]] @@ -3144,6 +3142,15 @@ version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08" +[[package]] +name = "matrixmultiply" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4f7ec66360130972f34830bfad9ef05c6610a43938a467bcc9ab9369ab3478f" +dependencies = [ + "rawpointer", +] + [[package]] name = "maybe-uninit" version = "2.0.0" @@ -3156,20 +3163,30 @@ version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi 0.3.8", +] + [[package]] name = "memoffset" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75189eb85871ea5c2e2c15abbdd541185f63b408415e5051f5cac122d8c774b9" +checksum = "b4fc2c02a7e374099d4ee95a193111f72d2110197fe200272371758f6c3643d8" dependencies = [ - "rustc_version", + "autocfg 1.0.0", ] [[package]] name = "memory-db" -version = "0.19.0" +version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "198831fe8722331a395bc199a5d08efbc197497ef354cb4c77b969c02ffc0fc4" +checksum = "f58381b20ebe2c578e75dececd9da411414903415349548ccc46aac3209cdfbc" dependencies = [ "ahash", "hash-db", @@ -3185,23 +3202,14 @@ checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" [[package]] name = "merlin" -version = "1.3.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0942b357c1b4d0dc43ba724674ec89c3218e6ca2b3e8269e7cb53bcecd2f6e" +checksum = "c6feca46f4fa3443a01769d768727f10c10a20fdb65e52dc16a81f0c8269bb78" dependencies = [ "byteorder 1.3.4", "keccak", - "rand_core 0.4.2", - "zeroize 1.1.0", -] - -[[package]] -name = "mime" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba626b8a6de5da682e1caa06bdb42a335aee5a84db8e5046a3e8ab17ba0a3ae0" -dependencies = [ - "log 0.3.9", + "rand_core 0.5.1", + "zeroize", ] [[package]] @@ -3225,7 +3233,7 @@ dependencies = [ "iovec", "kernel32-sys", "libc", - "log 0.4.8", + "log", "miow", "net2", "slab", @@ -3239,7 +3247,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52403fe290012ce777c4626790c8951324a2b9e3316b3143779c72b029742f19" dependencies = [ "lazycell", - "log 0.4.8", + "log", "mio", "slab", ] @@ -3273,51 +3281,65 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0debeb9fcf88823ea64d64e4a815ab1643f33127d995978e099942ce38f25238" +[[package]] +name = "multihash" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47fbc227f7e2b1cb701f95404579ecb2668abbdd3c7ef7a6cbb3cc0d3b236869" +dependencies = [ + "blake2b_simd", + "blake2s_simd", + "digest", + "sha-1", + "sha2", + "sha3", + "unsigned-varint", +] + [[package]] name = "multimap" -version = "0.8.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97fbd5d00e0e37bfb10f433af8f5aaf631e739368dc9fc28286ca81ca4948dc" +checksum = "d8883adfde9756c1d30b0f519c9b8c502a94b41ac62f696453c37c7fc0a958ce" [[package]] name = "multistream-select" -version = "0.7.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f938ffe420493e77c8b6cbcc3f282283f68fc889c5dcbc8e51668d5f3a01ad94" +checksum = "74cdcf7cfb3402881e15a1f95116cb033d69b33c83d481e1234777f5ef0c3d2c" dependencies = [ "bytes 0.5.4", - "futures 0.1.29", - "log 0.4.8", + "futures 0.3.4", + "log", + "pin-project", "smallvec 1.2.0", - "tokio-io", "unsigned-varint", ] [[package]] -name = "names" -version = "0.11.0" +name = "nalgebra" +version = "0.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" +checksum = "aaa9fddbc34c8c35dd2108515587b8ce0cab396f17977b8c738568e4edb521a2" dependencies = [ - "rand 0.3.23", + "alga", + "approx", + "generic-array", + "matrixmultiply", + "num-complex", + "num-rational", + "num-traits 0.2.11", + "rand 0.6.5", + "typenum", ] [[package]] -name = "native-tls" -version = "0.2.3" +name = "names" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b2df1a4c22fd44a62147fd8f13dd0f95c9d8ca7b2610299b2a2f9cf8964274e" +checksum = "ef320dab323286b50fb5cdda23f61c796a72a89998ab565ca32525c5c556f2da" dependencies = [ - "lazy_static", - "libc", - "log 0.4.8", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 0.3.4", - "security-framework-sys 0.3.3", - "tempfile", + "rand 0.3.23", ] [[package]] @@ -3331,6 +3353,20 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "netstat2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29449d242064c48d3057a194b049a2bdcccadda16faa18a91468677b44e8d422" +dependencies = [ + "bitflags", + "byteorder 1.3.4", + "enum-primitive-derive", + "libc", + "num-traits 0.2.11", + "thiserror", +] + [[package]] name = "nix" version = "0.17.0" @@ -3344,9 +3380,24 @@ dependencies = [ "void", ] +[[package]] +name = "node-bench" +version = "0.8.0-alpha.5" +dependencies = [ + "log", + "node-primitives", + "node-testing", + "sc-cli", + "sc-client-api", + "serde", + "serde_json", + "sp-runtime", + "structopt", +] + [[package]] name = "node-cli" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "assert_cmd", "frame-benchmarking-cli", @@ -3355,7 +3406,7 @@ dependencies = [ "futures 0.3.4", "hex-literal", "jsonrpc-core", - "log 0.4.8", + "log", "nix", "node-executor", "node-inspect", @@ -3368,10 +3419,13 @@ dependencies = [ "pallet-contracts", "pallet-im-online", "pallet-indices", + "pallet-staking", "pallet-timestamp", "pallet-transaction-payment", "parity-scale-codec", + "platforms", "rand 0.7.3", + "regex", "sc-authority-discovery", "sc-basic-authorship", "sc-chain-spec", @@ -3410,14 +3464,13 @@ dependencies = [ "substrate-build-script-utils", "tempfile", "tracing", - "vergen", "wasm-bindgen", "wasm-bindgen-futures", ] [[package]] name = "node-executor" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "criterion 0.3.1", "frame-benchmarking", @@ -3451,10 +3504,10 @@ dependencies = [ [[package]] name = "node-inspect" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", - "log 0.4.8", + "log", "parity-scale-codec", "sc-cli", "sc-client-api", @@ -3467,7 +3520,7 @@ dependencies = [ [[package]] name = "node-primitives" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "pretty_assertions", "sp-core", @@ -3477,7 +3530,7 @@ dependencies = [ [[package]] name = "node-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "jsonrpc-core", "node-primitives", @@ -3500,20 +3553,20 @@ dependencies = [ [[package]] name = "node-rpc-client" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "env_logger 0.7.1", "futures 0.1.29", "hyper 0.12.35", "jsonrpc-core-client", - "log 0.4.8", + "log", "node-primitives", "sc-rpc", ] [[package]] name = "node-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3539,9 +3592,12 @@ dependencies = [ "pallet-indices", "pallet-membership", "pallet-offences", + "pallet-offences-benchmarking", "pallet-randomness-collective-flip", "pallet-recovery", + "pallet-scheduler", "pallet-session", + "pallet-session-benchmarking", "pallet-society", "pallet-staking", "pallet-staking-reward-curve", @@ -3553,7 +3609,6 @@ dependencies = [ "pallet-utility", "pallet-vesting", "parity-scale-codec", - "rustc-hex", "serde", "sp-api", "sp-authority-discovery", @@ -3575,10 +3630,10 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "futures 0.3.4", - "log 0.4.8", + "log", "node-template-runtime", "sc-basic-authorship", "sc-cli", @@ -3599,12 +3654,11 @@ dependencies = [ "sp-transaction-pool", "structopt", "substrate-build-script-utils", - "vergen", ] [[package]] name = "node-template-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-executive", "frame-support", @@ -3636,13 +3690,13 @@ dependencies = [ [[package]] name = "node-testing" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "criterion 0.3.1", "frame-support", "frame-system", "fs_extra", - "log 0.4.8", + "log", "node-executor", "node-primitives", "node-runtime", @@ -3682,9 +3736,9 @@ dependencies = [ [[package]] name = "node-transaction-factory" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ - "log 0.4.8", + "log", "parity-scale-codec", "sc-block-builder", "sc-cli", @@ -3713,12 +3767,12 @@ checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" [[package]] name = "nom" -version = "4.2.3" +version = "5.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad2a91a8e869eeb30b9cb3119ae87773a8f4ae617f41b1eb9c154b2905f7bd6" +checksum = "0b471253da97532da4b61552249c521e01e736071f71c1a4f7ebbfbf0a06aad6" dependencies = [ "memchr", - "version_check 0.1.5", + "version_check", ] [[package]] @@ -3738,7 +3792,17 @@ checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" dependencies = [ "autocfg 1.0.0", "num-integer", - "num-traits", + "num-traits 0.2.11", +] + +[[package]] +name = "num-complex" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6b19411a9719e753aff12e5187b74d60d3dc449ec3f4dc21e3989c3f554bc95" +dependencies = [ + "autocfg 1.0.0", + "num-traits 0.2.11", ] [[package]] @@ -3748,19 +3812,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" dependencies = [ "autocfg 1.0.0", - "num-traits", + "num-traits 0.2.11", ] [[package]] name = "num-rational" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da4dc79f9e6c81bef96148c8f6b8e72ad4541caa4a24373e900a36da07de03a3" +checksum = "5c000134b5dbf44adc5cb772486d335293351644b801551abe8f75c84cfa4aef" dependencies = [ "autocfg 1.0.0", "num-bigint", "num-integer", - "num-traits", + "num-traits 0.2.11", +] + +[[package]] +name = "num-traits" +version = "0.1.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e5113e9fd4cc14ded8e499429f396a20f98c772a47cc8622a736e1ec843c31" +dependencies = [ + "num-traits 0.2.11", ] [[package]] @@ -3770,6 +3843,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" dependencies = [ "autocfg 1.0.0", + "libm", ] [[package]] @@ -3827,39 +3901,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" -[[package]] -name = "openssl" -version = "0.10.28" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "973293749822d7dd6370d6da1e523b0d1db19f06c459134c658b2a4261378b52" -dependencies = [ - "bitflags", - "cfg-if", - "foreign-types", - "lazy_static", - "libc", - "openssl-sys", -] - [[package]] name = "openssl-probe" version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77af24da69f9d9341038eba93a073b1fdaaa1b788221b00a69bce9e762cb32de" -[[package]] -name = "openssl-sys" -version = "0.9.54" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1024c0a59774200a555087a6da3f253a9095a5f344e353b212ac4c8b8e450986" -dependencies = [ - "autocfg 1.0.0", - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "output_vt100" version = "0.1.2" @@ -3880,7 +3927,7 @@ dependencies = [ [[package]] name = "pallet-assets" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -3894,7 +3941,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -3916,7 +3963,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -3934,7 +3981,7 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -3950,18 +3997,16 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", - "hex-literal", - "lazy_static", "pallet-session", "pallet-timestamp", "parity-scale-codec", - "parking_lot 0.10.0", "serde", "sp-consensus-babe", + "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-io", @@ -3969,13 +4014,11 @@ dependencies = [ "sp-staking", "sp-std", "sp-timestamp", - "sp-version", - "substrate-test-runtime", ] [[package]] name = "pallet-balances" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "frame-support", @@ -3991,7 +4034,7 @@ dependencies = [ [[package]] name = "pallet-benchmark" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4005,8 +4048,9 @@ dependencies = [ [[package]] name = "pallet-collective" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "hex-literal", @@ -4021,7 +4065,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "assert_matches", "frame-support", @@ -4046,7 +4090,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -4055,7 +4099,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4074,7 +4118,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4085,12 +4129,14 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "hex-literal", "pallet-balances", + "pallet-scheduler", "parity-scale-codec", "serde", "sp-core", @@ -4102,7 +4148,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4118,12 +4164,13 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", "hex-literal", "pallet-balances", + "pallet-scheduler", "parity-scale-codec", "serde", "sp-core", @@ -4136,7 +4183,7 @@ dependencies = [ [[package]] name = "pallet-evm" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "evm", "frame-support", @@ -4156,7 +4203,7 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4172,13 +4219,13 @@ dependencies = [ [[package]] name = "pallet-example-offchain-worker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", + "lite-json", "parity-scale-codec", "serde", - "serde_json", "sp-core", "sp-io", "sp-runtime", @@ -4187,7 +4234,7 @@ dependencies = [ [[package]] name = "pallet-finality-tracker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4204,7 +4251,7 @@ dependencies = [ [[package]] name = "pallet-generic-asset" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4218,7 +4265,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4236,7 +4283,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4253,8 +4300,9 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-authorship", @@ -4271,7 +4319,7 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4287,7 +4335,7 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4301,7 +4349,7 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4316,7 +4364,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4330,9 +4378,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-offences-benchmarking" +version = "2.0.0-alpha.5" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-im-online", + "pallet-offences", + "pallet-session", + "pallet-staking", + "parity-scale-codec", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", +] + [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4346,7 +4412,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "enumflags2", "frame-support", @@ -4360,9 +4426,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-scheduler" +version = "2.0.0-alpha.5" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-scored-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4377,7 +4458,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4395,15 +4476,35 @@ dependencies = [ "sp-trie", ] +[[package]] +name = "pallet-session-benchmarking" +version = "2.0.0-alpha.5" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "pallet-balances", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-society" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", "pallet-balances", "parity-scale-codec", - "rand_chacha 0.2.1", + "rand_chacha 0.2.2", "serde", "sp-core", "sp-io", @@ -4413,41 +4514,50 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "env_logger 0.7.1", + "frame-benchmarking", "frame-support", "frame-system", + "hex", "pallet-authorship", "pallet-balances", + "pallet-indices", "pallet-session", "pallet-staking-reward-curve", "pallet-timestamp", "parity-scale-codec", + "parking_lot 0.10.0", + "rand 0.7.3", + "rand_chacha 0.2.2", "serde", + "sp-application-crypto", "sp-core", "sp-io", - "sp-keyring", "sp-phragmen", "sp-runtime", "sp-staking", "sp-std", + "sp-storage", + "static_assertions", "substrate-test-utils", ] [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote", + "quote 1.0.3", "sp-runtime", - "syn", + "syn 1.0.17", ] [[package]] name = "pallet-sudo" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4461,12 +4571,11 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", "parity-scale-codec", - "safe-mix", "sp-core", "sp-io", "sp-runtime", @@ -4474,7 +4583,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-benchmarking", "frame-support", @@ -4492,7 +4601,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -4507,7 +4616,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4524,7 +4633,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "parity-scale-codec", @@ -4537,8 +4646,9 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -4552,8 +4662,9 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -4567,7 +4678,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4584,12 +4695,6 @@ dependencies = [ "sp-storage", ] -[[package]] -name = "parity-bytes" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c276d76c5333b8c2579e02d49a06733a55b8282d2d9b13e8d53b6406bd7e30a" - [[package]] name = "parity-multiaddr" version = "0.7.3" @@ -4608,6 +4713,24 @@ dependencies = [ "url 2.1.1", ] +[[package]] +name = "parity-multiaddr" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4db35e222f783ef4e6661873f6c165c4eb7b65e0c408349818517d5705c2d7d3" +dependencies = [ + "arrayref", + "bs58", + "byteorder 1.3.4", + "data-encoding", + "multihash", + "percent-encoding 2.1.0", + "serde", + "static_assertions", + "unsigned-varint", + "url 2.1.1", +] + [[package]] name = "parity-multihash" version = "0.2.3" @@ -4625,9 +4748,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910" +checksum = "329c8f7f4244ddb5c37c103641027a76c530e65e8e4b8240b29f81ea40508b17" dependencies = [ "arrayvec 0.5.1", "bitvec", @@ -4644,8 +4767,8 @@ checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -4656,9 +4779,9 @@ checksum = "aa9777aa91b8ad9dd5aaa04a9b6bcb02c7f1deb952fca5a66034d5e63afc5c6f" [[package]] name = "parity-util-mem" -version = "0.5.2" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9344bc978467339b9ae688f9dcf279d1aaa0ccfc88e9a780c729b765a82d57d5" +checksum = "6e42755f26e5ea21a6a819d9e63cbd70713e9867a2b767ec2cc65ca7659532c5" dependencies = [ "cfg-if", "ethereum-types", @@ -4679,7 +4802,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ "proc-macro2", - "syn", + "syn 1.0.17", "synstructure", ] @@ -4750,9 +4873,9 @@ dependencies = [ [[package]] name = "paste" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63e1afe738d71b1ebab5f1207c055054015427dbfc7bbe9ee1266894156ec046" +checksum = "ab4fb1930692d1b6a9cfabdde3d06ea0a7d186518e2f4d67660d8970e2fa647a" dependencies = [ "paste-impl", "proc-macro-hack", @@ -4760,14 +4883,14 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d4dc4a7f6f743211c5aab239640a65091535d97d43d92a52bca435a640892bb" +checksum = "a62486e111e571b1e93b710b61e8f493c0013be39629b714cb166bdb06aa5a8a" dependencies = [ "proc-macro-hack", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -4830,8 +4953,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -4858,6 +4981,12 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6" +[[package]] +name = "platforms" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb3b2b1033b8a60b4da6ee470325f887758c95d5320f52f9ce0df055a55940e" + [[package]] name = "plotters" version = "0.2.12" @@ -4865,7 +4994,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4e3bb8da247d27ae212529352020f3e5ee16e83c0c258061d27b08ab92675eeb" dependencies = [ "js-sys", - "num-traits", + "num-traits 0.2.11", "wasm-bindgen", "web-sys", ] @@ -4878,9 +5007,9 @@ checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" [[package]] name = "predicates" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1188bf092c81c18228c383b190c069a8a613c18a046ffa9fdfc0f5fc8fb2da8a" +checksum = "347a1b6f0b21e636bc9872fb60b83b8e185f6f5516298b8238699f7f9a531030" dependencies = [ "difference", "predicates-core", @@ -4916,9 +5045,9 @@ dependencies = [ [[package]] name = "primitive-types" -version = "0.6.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4336f4f5d5524fa60bcbd6fe626f9223d8142a50e7053e979acdf0da41ab975" +checksum = "e5e4b9943a2da369aec5e96f7c10ebc74fcf434d39590d974b0a3460e6f67fbb" dependencies = [ "fixed-hash", "impl-codec", @@ -4938,68 +5067,78 @@ dependencies = [ [[package]] name = "proc-macro-error" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "052b3c9af39c7e5e94245f820530487d19eb285faedcb40e0c3275132293f242" +checksum = "18f33027081eba0a6d8aba6d1b1c3a3be58cbb12106341c2d5759fcd9b5277e7" dependencies = [ "proc-macro-error-attr", "proc-macro2", - "quote", - "rustversion", - "syn", + "quote 1.0.3", + "syn 1.0.17", + "version_check", ] [[package]] name = "proc-macro-error-attr" -version = "0.4.9" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d175bef481c7902e63e3165627123fff3502f06ac043d3ef42d08c1246da9253" +checksum = "8a5b4b77fdb63c1eca72173d68d24501c54ab1269409f6b672c85deb18af69de" dependencies = [ "proc-macro2", - "quote", - "rustversion", - "syn", + "quote 1.0.3", + "syn 1.0.17", "syn-mid", + "version_check", ] [[package]] name = "proc-macro-hack" -version = "0.5.11" +version = "0.5.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] +checksum = "0d659fe7c6d27f25e9d80a1a094c223f5246f6a6596453e09d7229bf42750b63" [[package]] name = "proc-macro-nested" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" +checksum = "8e946095f9d3ed29ec38de908c22f95d9ac008e424c7bcae54c75a79c527c694" [[package]] name = "proc-macro2" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +checksum = "df246d292ff63439fea9bc8c0a270bed0e390d5ebd4db4ba15aba81111b5abe3" dependencies = [ - "unicode-xid", + "unicode-xid 0.2.0", ] [[package]] -name = "prometheus" -version = "0.7.0" +name = "procfs" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1" +checksum = "fe50036aa1b71e553a4a0c48ab7baabf8aa8c7a5a61aae06bf38c2eab7430475" dependencies = [ - "cfg-if", + "bitflags", + "byteorder 1.3.4", + "chrono", + "hex", + "lazy_static", + "libc", + "libflate", +] + +[[package]] +name = "prometheus" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0575e258dab62268e7236d7307caa38848acbda7ec7ab87bd9093791e999d20" +dependencies = [ + "cfg-if", "fnv", "lazy_static", "protobuf", - "quick-error", "spin", + "thiserror", ] [[package]] @@ -5021,7 +5160,7 @@ dependencies = [ "bytes 0.5.4", "heck", "itertools", - "log 0.4.8", + "log", "multimap", "petgraph", "prost", @@ -5039,8 +5178,8 @@ dependencies = [ "anyhow", "itertools", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -5055,9 +5194,9 @@ dependencies = [ [[package]] name = "protobuf" -version = "2.10.1" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6686ddd96a8dbe2687b5f2a687b2cfb520854010ec480f2d74c32e7c9873d3c5" +checksum = "71964f34fd51cf04882d7ae3325fa0794d4cad66a03d0003f38d8ae4f63ba126" [[package]] name = "pwasm-utils" @@ -5066,7 +5205,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4f7a12f176deee919f4ba55326ee17491c8b707d0987aed822682c821b660192" dependencies = [ "byteorder 1.3.4", - "log 0.4.8", + "log", "parity-wasm 0.41.0", ] @@ -5083,22 +5222,28 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a44883e74aa97ad63db83c4bf8ca490f02b2fc02f92575e720c8551e843c945f" dependencies = [ "env_logger 0.7.1", - "log 0.4.8", + "log", "rand 0.7.3", "rand_core 0.5.1", ] [[package]] name = "quicksink" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8461ef7445f61fd72d8dcd0629ce724b9131b3c2eb36e83a5d3d4161c127530" +checksum = "77de3c815e5a160b1539c6592796801df2043ae35e123b46d73380cfa57af858" dependencies = [ "futures-core", "futures-sink", "pin-project-lite", ] +[[package]] +name = "quote" +version = "0.3.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a" + [[package]] name = "quote" version = "1.0.3" @@ -5108,6 +5253,12 @@ dependencies = [ "proc-macro2", ] +[[package]] +name = "radium" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "def50a86306165861203e7f84ecffbbdfdea79f0e51039b33de1e952358c47ac" + [[package]] name = "rand" version = "0.3.23" @@ -5131,6 +5282,19 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "rand" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c618c47cd3ebd209790115ab837de41425723956ad3ce2e6a7f09890947cacb9" +dependencies = [ + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.3.1", + "winapi 0.3.8", +] + [[package]] name = "rand" version = "0.6.5" @@ -5158,7 +5322,7 @@ checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" dependencies = [ "getrandom", "libc", - "rand_chacha 0.2.1", + "rand_chacha 0.2.2", "rand_core 0.5.1", "rand_hc 0.2.0", ] @@ -5175,11 +5339,11 @@ dependencies = [ [[package]] name = "rand_chacha" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" dependencies = [ - "c2-chacha", + "ppv-lite86", "rand_core 0.5.1", ] @@ -5300,6 +5464,12 @@ dependencies = [ "rustc_version", ] +[[package]] +name = "rawpointer" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + [[package]] name = "rayon" version = "1.3.0" @@ -5352,9 +5522,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.3.4" +version = "1.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" +checksum = "7f6946991529684867e47d86474e3a6d0c0ab9b82d5821e314b1ede31fa3a4b3" dependencies = [ "aho-corasick", "memchr", @@ -5364,18 +5534,18 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92b73c2a1770c255c240eaa4ee600df1704a38dc3feaa6e949e7fcd4f8dc09f9" +checksum = "ae1ded71d66a4a97f5e961fd0cb25a5f366a42a41570d16a763a69c092c26ae4" dependencies = [ "byteorder 1.3.4", ] [[package]] name = "regex-syntax" -version = "0.6.14" +version = "0.6.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b28dfe3fe9badec5dbf0a79a9cccad2cfc2ab5484bdb3e44cbd1ae8b3ba2be06" +checksum = "7fe5bd57d1d7414c6b5ed48563a2c855d995ff777729dcd91c369ec7fea395ae" [[package]] name = "region" @@ -5400,9 +5570,9 @@ dependencies = [ [[package]] name = "ring" -version = "0.16.11" +version = "0.16.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "741ba1704ae21999c00942f9f5944f801e977f54302af346b596287599ad1862" +checksum = "1ba5a8ec64ee89a76c98c549af81ff14813df09c3e6dc4766c3856da48597a0c" dependencies = [ "cc", "lazy_static", @@ -5413,11 +5583,17 @@ dependencies = [ "winapi 0.3.8", ] +[[package]] +name = "rle-decode-fast" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cabe4fa914dec5870285fa7f71f602645da47c486e68486d2b4ceb4a343e90ac" + [[package]] name = "rlp" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a44d5ae8afcb238af8b75640907edc6c931efcfab2c854e81ed35fa080f84cd" +checksum = "4a7d3f9bed94764eac15b8f14af59fac420c236adaff743b7bcc88e265cb4345" dependencies = [ "rustc-hex", ] @@ -5488,7 +5664,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b25a18b1bf7387f0145e7f8324e700805aade3842dd3db2e74e4cdeb4677c09e" dependencies = [ "base64 0.10.1", - "log 0.4.8", + "log", "ring", "sct", "webpki", @@ -5501,7 +5677,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c0d4a31f5d68413404705d6982529b0e11a9aacd4839d1d6222ee3b8cb4015e1" dependencies = [ "base64 0.11.0", - "log 0.4.8", + "log", "ring", "sct", "webpki", @@ -5516,7 +5692,7 @@ dependencies = [ "openssl-probe", "rustls 0.17.0", "schannel", - "security-framework 0.4.1", + "security-framework", ] [[package]] @@ -5526,8 +5702,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -5543,9 +5719,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8506c1de11c9c4e4c38863ccbe02a305c8188e85a05a784c9e11e1c3910c8" +checksum = "535622e6be132bccd223f4bb2b8ac8d53cda3c7a6394944d3b2b33fb974f9d76" [[package]] name = "safe-mix" @@ -5556,12 +5732,6 @@ dependencies = [ "rustc_version", ] -[[package]] -name = "safemem" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef703b7cb59335eae2eb93ceb664c0eb7ea6bf567079d843e09420219668e072" - [[package]] name = "salsa20" version = "0.3.0" @@ -5593,7 +5763,7 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "bytes 0.5.4", "derive_more", @@ -5601,7 +5771,7 @@ dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log", "parity-scale-codec", "prost", "prost-build", @@ -5623,10 +5793,11 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "futures 0.3.4", - "log 0.4.8", + "futures-timer 3.0.2", + "log", "parity-scale-codec", "parking_lot 0.10.0", "sc-block-builder", @@ -5646,7 +5817,7 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -5663,7 +5834,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "impl-trait-for-tuples", "sc-chain-spec-derive", @@ -5671,23 +5842,24 @@ dependencies = [ "sc-telemetry", "serde", "serde_json", + "sp-chain-spec", "sp-core", "sp-runtime", ] [[package]] name = "sc-chain-spec-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "sc-cli" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "ansi_term 0.12.1", "app_dirs", @@ -5699,8 +5871,9 @@ dependencies = [ "fdlimit", "futures 0.3.4", "lazy_static", - "log 0.4.8", + "log", "names", + "nix", "parity-util-mem", "regex", "rpassword", @@ -5717,16 +5890,17 @@ dependencies = [ "sp-panic-handler", "sp-runtime", "sp-state-machine", + "sp-utils", "structopt", "substrate-prometheus-endpoint", "tempfile", "time", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] name = "sc-client" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "env_logger 0.7.1", @@ -5736,7 +5910,7 @@ dependencies = [ "hex-literal", "kvdb", "kvdb-memorydb", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "sc-block-builder", @@ -5755,6 +5929,7 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-trie", + "sp-utils", "sp-version", "substrate-prometheus-endpoint", "substrate-test-runtime-client", @@ -5764,7 +5939,7 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", "fnv", @@ -5772,7 +5947,8 @@ dependencies = [ "hash-db", "hex-literal", "kvdb", - "log 0.4.8", + "lazy_static", + "log", "parity-scale-codec", "parking_lot 0.10.0", "sc-executor", @@ -5791,12 +5967,13 @@ dependencies = [ "sp-test-primitives", "sp-transaction-pool", "sp-trie", + "sp-utils", "sp-version", ] [[package]] name = "sc-client-db" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "env_logger 0.7.1", "hash-db", @@ -5804,12 +5981,11 @@ dependencies = [ "kvdb-memorydb", "kvdb-rocksdb", "linked-hash-map", - "log 0.4.8", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", "quickcheck", - "rand 0.7.3", "sc-client", "sc-client-api", "sc-executor", @@ -5828,13 +6004,13 @@ dependencies = [ [[package]] name = "sc-consensus-aura" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "env_logger 0.7.1", "futures 0.3.4", "futures-timer 3.0.2", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "sc-block-builder", @@ -5866,18 +6042,18 @@ dependencies = [ [[package]] name = "sc-consensus-babe" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "env_logger 0.7.1", "fork-tree", "futures 0.3.4", "futures-timer 3.0.2", - "log 0.4.8", + "log", "merlin", "num-bigint", "num-rational", - "num-traits", + "num-traits 0.2.11", "parity-scale-codec", "parking_lot 0.10.0", "pdqselect", @@ -5902,6 +6078,7 @@ dependencies = [ "sp-blockchain", "sp-consensus", "sp-consensus-babe", + "sp-consensus-vrf", "sp-core", "sp-inherents", "sp-io", @@ -5915,7 +6092,7 @@ dependencies = [ [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "futures 0.3.4", @@ -5940,7 +6117,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "fork-tree", "parity-scale-codec", @@ -5952,15 +6129,16 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ + "assert_matches", "derive_more", "env_logger 0.7.1", "futures 0.3.4", "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log 0.4.8", + "log", "parking_lot 0.10.0", "sc-basic-authorship", "sc-client", @@ -5975,16 +6153,16 @@ dependencies = [ "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] name = "sc-consensus-pow" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "futures 0.3.4", - "log 0.4.8", + "log", "parity-scale-codec", "sc-client-api", "sp-api", @@ -6000,11 +6178,11 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "sc-client-api", @@ -6021,9 +6199,9 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ - "log 0.4.8", + "log", "sc-client-api", "sp-authorship", "sp-consensus", @@ -6034,14 +6212,14 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "assert_matches", "derive_more", "hex-literal", "lazy_static", "libsecp256k1", - "log 0.4.8", + "log", "parity-scale-codec", "parity-wasm 0.41.0", "parking_lot 0.10.0", @@ -6068,11 +6246,12 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", - "log 0.4.8", + "log", "parity-scale-codec", + "parity-wasm 0.41.0", "sp-allocator", "sp-core", "sp-runtime-interface", @@ -6083,11 +6262,10 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ - "log 0.4.8", + "log", "parity-scale-codec", - "parity-wasm 0.41.0", "sc-executor-common", "sp-allocator", "sp-core", @@ -6098,10 +6276,12 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "assert_matches", - "log 0.4.8", + "cranelift-codegen", + "cranelift-wasm", + "log", "parity-scale-codec", "parity-wasm 0.41.0", "sc-executor-common", @@ -6110,12 +6290,14 @@ dependencies = [ "sp-core", "sp-runtime-interface", "sp-wasm-interface", - "wasmtime", + "substrate-wasmtime", + "substrate-wasmtime-runtime", + "wasmtime-environ", ] [[package]] name = "sc-finality-grandpa" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "assert_matches", "env_logger 0.7.1", @@ -6123,7 +6305,7 @@ dependencies = [ "fork-tree", "futures 0.3.4", "futures-timer 3.0.2", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "pin-project", @@ -6149,19 +6331,20 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-state-machine", + "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] name = "sc-informant" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "ansi_term 0.12.1", "futures 0.3.4", - "log 0.4.8", + "log", "parity-util-mem", "sc-client-api", "sc-network", @@ -6173,7 +6356,7 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", "hex", @@ -6188,7 +6371,7 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "assert_matches", "async-std", @@ -6203,10 +6386,11 @@ dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", "futures_codec", + "hex", "libp2p", "linked-hash-map", "linked_hash_set", - "log 0.4.8", + "log", "lru", "nohash-hasher", "parity-scale-codec", @@ -6216,7 +6400,6 @@ dependencies = [ "prost-build", "quickcheck", "rand 0.7.3", - "rustc-hex", "sc-block-builder", "sc-client", "sc-client-api", @@ -6234,6 +6417,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-test-primitives", + "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", @@ -6242,20 +6426,22 @@ dependencies = [ "unsigned-varint", "void", "wasm-timer", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "sc-network-gossip" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log", "lru", "sc-network", "sp-runtime", + "sp-utils", + "substrate-test-runtime-client", "wasm-timer", ] @@ -6267,7 +6453,7 @@ dependencies = [ "futures 0.3.4", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log", "parking_lot 0.10.0", "rand 0.7.3", "sc-block-builder", @@ -6286,7 +6472,7 @@ dependencies = [ [[package]] name = "sc-offchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "bytes 0.5.4", "env_logger 0.7.1", @@ -6294,9 +6480,9 @@ dependencies = [ "fnv", "futures 0.3.4", "futures-timer 3.0.2", - "hyper 0.13.2", + "hyper 0.13.4", "hyper-rustls", - "log 0.4.8", + "log", "num_cpus", "parity-scale-codec", "parking_lot 0.10.0", @@ -6311,26 +6497,28 @@ dependencies = [ "sp-offchain", "sp-runtime", "sp-transaction-pool", + "sp-utils", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] name = "sc-peerset" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "futures 0.3.4", "libp2p", - "log 0.4.8", + "log", "rand 0.7.3", "serde_json", + "sp-utils", "wasm-timer", ] [[package]] name = "sc-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "assert_matches", "futures 0.1.29", @@ -6338,10 +6526,9 @@ dependencies = [ "hash-db", "jsonrpc-core", "jsonrpc-pubsub", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", - "rustc-hex", "sc-block-builder", "sc-client", "sc-client-api", @@ -6353,6 +6540,7 @@ dependencies = [ "serde_json", "sp-api", "sp-blockchain", + "sp-chain-spec", "sp-core", "sp-io", "sp-offchain", @@ -6361,6 +6549,7 @@ dependencies = [ "sp-session", "sp-state-machine", "sp-transaction-pool", + "sp-utils", "sp-version", "substrate-test-runtime-client", "tokio 0.1.22", @@ -6368,7 +6557,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "futures 0.3.4", @@ -6376,11 +6565,12 @@ dependencies = [ "jsonrpc-core-client", "jsonrpc-derive", "jsonrpc-pubsub", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "serde", "serde_json", + "sp-chain-spec", "sp-core", "sp-rpc", "sp-runtime", @@ -6390,13 +6580,13 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "jsonrpc-core", "jsonrpc-http-server", "jsonrpc-pubsub", "jsonrpc-ws-server", - "log 0.4.8", + "log", "serde", "serde_json", "sp-runtime", @@ -6417,7 +6607,7 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "exit-future", @@ -6426,11 +6616,13 @@ dependencies = [ "futures-diagnose", "futures-timer 3.0.2", "lazy_static", - "log 0.4.8", - "parity-multiaddr", + "log", + "netstat2", + "parity-multiaddr 0.7.3", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", + "procfs", "sc-chain-spec", "sc-client", "sc-client-api", @@ -6459,10 +6651,10 @@ dependencies = [ "sp-runtime", "sp-session", "sp-transaction-pool", + "sp-utils", "substrate-prometheus-endpoint", "substrate-test-runtime-client", "sysinfo", - "target_info", "tracing", "wasm-timer", ] @@ -6475,7 +6667,7 @@ dependencies = [ "fdlimit", "futures 0.1.29", "futures 0.3.4", - "log 0.4.8", + "log", "sc-client", "sc-network", "sc-service", @@ -6489,10 +6681,10 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "env_logger 0.7.1", - "log 0.4.8", + "log", "parity-scale-codec", "parity-util-mem", "parity-util-mem-derive", @@ -6503,13 +6695,13 @@ dependencies = [ [[package]] name = "sc-telemetry" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "bytes 0.5.4", "futures 0.3.4", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log", "parking_lot 0.10.0", "pin-project", "rand 0.7.3", @@ -6524,10 +6716,10 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "erased-serde", - "log 0.4.8", + "log", "parking_lot 0.10.0", "sc-telemetry", "serde", @@ -6539,14 +6731,14 @@ dependencies = [ [[package]] name = "sc-transaction-graph" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "assert_matches", "criterion 0.3.1", "derive_more", "futures 0.3.4", "linked-hash-map", - "log 0.4.8", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", @@ -6555,19 +6747,22 @@ dependencies = [ "sp-core", "sp-runtime", "sp-transaction-pool", + "sp-utils", "substrate-test-runtime", "wasm-timer", ] [[package]] name = "sc-transaction-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "assert_matches", "derive_more", "futures 0.3.4", "futures-diagnose", - "futures-timer 2.0.2", - "log 0.4.8", + "hex", + "intervalier", + "log", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", @@ -6579,6 +6774,7 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-transaction-pool", + "sp-utils", "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "wasm-timer", @@ -6586,9 +6782,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "507a9e6e8ffe0a4e0ebb9a10293e62fdf7657c06f1b8bb07a8fcf697d2abf295" +checksum = "039c25b130bd8c1321ee2d7de7fde2659fa9c2744e4bb29711cfc852ea53cd19" dependencies = [ "lazy_static", "winapi 0.3.8", @@ -6596,19 +6792,20 @@ dependencies = [ [[package]] name = "schnorrkel" -version = "0.8.5" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eacd8381b3c37840c9c9f40472af529e49975bdcbc24f83c31059fd6539023d3" +checksum = "021b403afe70d81eea68f6ea12f6b3c9588e5d536a94c3bf80f15e7faa267862" dependencies = [ - "curve25519-dalek 1.2.3", - "failure", + "arrayref", + "arrayvec 0.5.1", + "curve25519-dalek", + "getrandom", "merlin", - "rand 0.6.5", - "rand_core 0.4.2", - "rand_os", + "rand 0.7.3", + "rand_core 0.5.1", "sha2", "subtle 2.2.2", - "zeroize 0.9.3", + "zeroize", ] [[package]] @@ -6639,8 +6836,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -6655,44 +6852,24 @@ dependencies = [ [[package]] name = "security-framework" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ef2429d7cefe5fd28bd1d2ed41c944547d4ff84776f5935b456da44593a16df" -dependencies = [ - "core-foundation 0.6.4", - "core-foundation-sys 0.6.2", - "libc", - "security-framework-sys 0.3.3", -] - -[[package]] -name = "security-framework" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97bbedbe81904398b6ebb054b3e912f99d55807125790f3198ac990d98def5b0" +checksum = "572dfa3a0785509e7a44b5b4bebcf94d41ba34e9ed9eb9df722545c3b3c4144a" dependencies = [ "bitflags", - "core-foundation 0.7.0", - "core-foundation-sys 0.7.0", - "security-framework-sys 0.4.1", -] - -[[package]] -name = "security-framework-sys" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e31493fc37615debb8c5090a7aeb4a9730bc61e77ab10b9af59f1a202284f895" -dependencies = [ - "core-foundation-sys 0.6.2", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06fd2f23e31ef68dd2328cc383bd493142e46107a3a0e24f7d734e3f3b80fe4c" +checksum = "8ddb15a5fec93b7021b8a9e96009c5d8d51c15673569f7c0f6b7204e5b7b404f" dependencies = [ - "core-foundation-sys 0.7.0", + "core-foundation-sys", "libc", ] @@ -6741,29 +6918,29 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "serde" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +checksum = "36df6ac6412072f67cf767ebbde4133a5b2e88e76dc6187fa7104cd16f783399" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.104" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +checksum = "9e549e3abf4fb8621bd1609f11dfc9f5e50320802273b12f3811a67e6716ea6c" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "serde_json" -version = "1.0.48" +version = "1.0.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" +checksum = "da07b57ee2623368351e9a0488bb0b261322a15a6e0ae53e243cbdc0f4208da9" dependencies = [ "itoa", "ryu", @@ -6885,8 +7062,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -6932,9 +7109,9 @@ dependencies = [ "bytes 0.5.4", "flate2", "futures 0.3.4", - "http 0.2.0", + "http 0.2.1", "httparse", - "log 0.4.8", + "log", "rand 0.7.3", "sha1", "smallvec 1.2.0", @@ -6942,18 +7119,12 @@ dependencies = [ "thiserror", ] -[[package]] -name = "sourcefile" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" - [[package]] name = "sp-allocator" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", - "log 0.4.8", + "log", "sp-core", "sp-std", "sp-wasm-interface", @@ -6961,7 +7132,7 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "hash-db", "parity-scale-codec", @@ -6976,13 +7147,13 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "blake2-rfc", "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -7006,7 +7177,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "serde", @@ -7028,11 +7199,11 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "criterion 0.3.1", "integer-sqrt", - "num-traits", + "num-traits 0.2.11", "parity-scale-codec", "primitive-types", "rand 0.7.3", @@ -7041,9 +7212,20 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-arithmetic-fuzzer" +version = "2.0.0-alpha.5" +dependencies = [ + "honggfuzz", + "num-bigint", + "num-traits 0.2.11", + "primitive-types", + "sp-arithmetic", +] + [[package]] name = "sp-authority-discovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-api", @@ -7054,7 +7236,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7064,7 +7246,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-api", @@ -7075,10 +7257,10 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", - "log 0.4.8", + "log", "lru", "parity-scale-codec", "parking_lot 0.10.0", @@ -7088,16 +7270,24 @@ dependencies = [ "sp-state-machine", ] +[[package]] +name = "sp-chain-spec" +version = "2.0.0-alpha.5" +dependencies = [ + "serde", + "serde_json", +] + [[package]] name = "sp-consensus" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "derive_more", "futures 0.3.4", "futures-diagnose", "futures-timer 3.0.2", "libp2p", - "log 0.4.8", + "log", "parity-scale-codec", "parking_lot 0.10.0", "serde", @@ -7107,12 +7297,13 @@ dependencies = [ "sp-state-machine", "sp-std", "sp-test-primitives", + "sp-utils", "sp-version", ] [[package]] name = "sp-consensus-aura" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-api", @@ -7125,13 +7316,13 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "parity-scale-codec", - "schnorrkel", "sp-api", "sp-application-crypto", "sp-consensus", + "sp-consensus-vrf", "sp-inherents", "sp-runtime", "sp-std", @@ -7140,7 +7331,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-api", @@ -7149,15 +7340,27 @@ dependencies = [ "sp-std", ] +[[package]] +name = "sp-consensus-vrf" +version = "0.8.0-alpha.5" +dependencies = [ + "parity-scale-codec", + "schnorrkel", + "sp-core", + "sp-runtime", + "sp-std", +] + [[package]] name = "sp-core" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "base58", "blake2-rfc", "byteorder 1.3.4", "criterion 0.2.11", "ed25519-dalek", + "futures 0.3.4", "hash-db", "hash256-std-hasher", "hex", @@ -7165,8 +7368,8 @@ dependencies = [ "impl-serde 0.3.0", "lazy_static", "libsecp256k1", - "log 0.4.8", - "num-traits", + "log", + "num-traits 0.2.11", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", @@ -7174,7 +7377,6 @@ dependencies = [ "primitive-types", "rand 0.7.3", "regex", - "rustc-hex", "schnorrkel", "serde", "serde_json", @@ -7187,24 +7389,24 @@ dependencies = [ "sp-storage", "substrate-bip39", "tiny-bip39", - "tiny-keccak 2.0.1", + "tiny-keccak 2.0.2", "twox-hash", "wasmi", - "zeroize 1.1.0", + "zeroize", ] [[package]] name = "sp-debug-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "sp-externalities" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "environmental", "sp-std", @@ -7213,7 +7415,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "serde", @@ -7225,7 +7427,7 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7234,7 +7436,7 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", "parity-scale-codec", @@ -7245,11 +7447,11 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "hash-db", "libsecp256k1", - "log 0.4.8", + "log", "parity-scale-codec", "sp-core", "sp-externalities", @@ -7262,7 +7464,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "lazy_static", "sp-core", @@ -7272,7 +7474,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "sp-api", "sp-runtime", @@ -7280,27 +7482,39 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "backtrace", - "log 0.4.8", + "log", ] [[package]] name = "sp-phragmen" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ + "parity-scale-codec", "rand 0.7.3", "serde", - "sp-io", + "sp-phragmen", + "sp-phragmen-compact", "sp-runtime", "sp-std", "substrate-test-utils", ] +[[package]] +name = "sp-phragmen-compact" +version = "2.0.0-dev" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote 1.0.3", + "syn 1.0.17", +] + [[package]] name = "sp-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "serde", "serde_json", @@ -7309,11 +7523,11 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "hash256-std-hasher", "impl-trait-for-tuples", - "log 0.4.8", + "log", "parity-scale-codec", "parity-util-mem", "paste", @@ -7330,7 +7544,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "primitive-types", @@ -7349,13 +7563,13 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "Inflector", "proc-macro-crate", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -7367,6 +7581,7 @@ dependencies = [ "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-test-wasm", + "sp-runtime-interface-test-wasm-deprecated", "sp-state-machine", ] @@ -7381,9 +7596,20 @@ dependencies = [ "substrate-wasm-builder-runner", ] +[[package]] +name = "sp-runtime-interface-test-wasm-deprecated" +version = "2.0.0-dev" +dependencies = [ + "sp-core", + "sp-io", + "sp-runtime-interface", + "sp-std", + "substrate-wasm-builder-runner", +] + [[package]] name = "sp-sandbox" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "assert_matches", "parity-scale-codec", @@ -7397,7 +7623,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "serde", "serde_json", @@ -7405,7 +7631,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "sp-api", "sp-core", @@ -7415,7 +7641,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -7424,12 +7650,12 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "hash-db", "hex-literal", - "log 0.4.8", - "num-traits", + "log", + "num-traits 0.2.11", "parity-scale-codec", "parking_lot 0.10.0", "rand 0.7.3", @@ -7444,11 +7670,11 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" [[package]] name = "sp-storage" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "impl-serde 0.2.3", "serde", @@ -7470,7 +7696,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7483,20 +7709,21 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "derive_more", "futures 0.3.4", - "log 0.4.8", + "log", "parity-scale-codec", "serde", "sp-api", "sp-runtime", + "sp-utils", ] [[package]] name = "sp-trie" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "criterion 0.2.11", "hash-db", @@ -7512,9 +7739,19 @@ dependencies = [ "trie-standardmap", ] +[[package]] +name = "sp-utils" +version = "2.0.0-alpha.5" +dependencies = [ + "futures 0.3.4", + "futures-core", + "lazy_static", + "prometheus", +] + [[package]] name = "sp-version" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "impl-serde 0.2.3", "parity-scale-codec", @@ -7525,7 +7762,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7551,6 +7788,15 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "statrs" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "10102ac8d55e35db2b3fafc26f81ba8647da2e15879ab686a67e6d19af2685e8" +dependencies = [ + "rand 0.5.6", +] + [[package]] name = "stream-cipher" version = "0.3.2" @@ -7586,9 +7832,9 @@ checksum = "8ea5119cdb4c55b55d432abb513a0429384878c15dde60cc77b1c99de1a95a6a" [[package]] name = "structopt" -version = "0.3.9" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1bcbed7d48956fcbb5d80c6b95aedb553513de0a1b451ea92679d999c010e98" +checksum = "c8faa2719539bbe9d77869bfb15d4ee769f99525e707931452c97b693b3f159d" dependencies = [ "clap", "lazy_static", @@ -7597,15 +7843,15 @@ dependencies = [ [[package]] name = "structopt-derive" -version = "0.4.2" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "095064aa1f5b94d14e635d0a5684cf140c43ae40a0fd990708d38f5d669e5f64" +checksum = "3f88b8e18c69496aad6f9ddf4630dd7d585bcaf765786cb415b9aec2fe5a0430" dependencies = [ "heck", "proc-macro-error", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -7625,13 +7871,13 @@ checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" dependencies = [ "heck", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] name = "subkey" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "clap", "derive_more", @@ -7650,7 +7896,6 @@ dependencies = [ "parity-scale-codec", "rand 0.7.3", "rpassword", - "rustc-hex", "sc-rpc", "serde_json", "sp-core", @@ -7661,9 +7906,9 @@ dependencies = [ [[package]] name = "substrate-bip39" -version = "0.3.1" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be511be555a3633e71739a79e4ddff6a6aaa6579fa6114182a51d72c3eb93c5" +checksum = "c004e8166d6e0aa3a9d5fa673e5b7098ff25f930de1013a21341988151e681bb" dependencies = [ "hmac", "pbkdf2", @@ -7673,7 +7918,7 @@ dependencies = [ [[package]] name = "substrate-browser-utils" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "chrono", "clear_on_drop", @@ -7685,7 +7930,7 @@ dependencies = [ "js-sys", "kvdb-web", "libp2p-wasm-ext", - "log 0.4.8", + "log", "rand 0.6.5", "rand 0.7.3", "sc-chain-spec", @@ -7698,11 +7943,14 @@ dependencies = [ [[package]] name = "substrate-build-script-utils" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" +dependencies = [ + "platforms", +] [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "frame-support", "frame-system", @@ -7713,12 +7961,12 @@ dependencies = [ "sc-rpc-api", "serde", "sp-storage", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" dependencies = [ "env_logger 0.7.1", "frame-system-rpc-runtime-api", @@ -7726,7 +7974,7 @@ dependencies = [ "jsonrpc-core", "jsonrpc-core-client", "jsonrpc-derive", - "log 0.4.8", + "log", "parity-scale-codec", "sc-client", "sc-transaction-pool", @@ -7741,15 +7989,15 @@ dependencies = [ [[package]] name = "substrate-prometheus-endpoint" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" dependencies = [ "async-std", "derive_more", "futures-util", - "hyper 0.13.2", - "log 0.4.8", + "hyper 0.13.4", + "log", "prometheus", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] @@ -7780,7 +8028,7 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "log 0.4.8", + "log", "memory-db", "pallet-babe", "pallet-timestamp", @@ -7847,7 +8095,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" [[package]] name = "substrate-wasm-builder" @@ -7868,6 +8116,75 @@ dependencies = [ name = "substrate-wasm-builder-runner" version = "1.0.5" +[[package]] +name = "substrate-wasmtime" +version = "0.13.0-threadsafe.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e512629525ecfe43bffe1f3d9e6bb0f08bf01155288ef27fcaae4ea086e4a9d" +dependencies = [ + "anyhow", + "backtrace", + "cfg-if", + "lazy_static", + "libc", + "region", + "rustc-demangle", + "substrate-wasmtime-jit", + "substrate-wasmtime-runtime", + "target-lexicon", + "wasmparser", + "wasmtime-environ", + "wasmtime-profiling", + "wat", + "winapi 0.3.8", +] + +[[package]] +name = "substrate-wasmtime-jit" +version = "0.13.0-threadsafe.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a20de5564886d2bcffdd351c9cd114ceb50758aa58eac3cedb14faabf7f93b91" +dependencies = [ + "anyhow", + "cfg-if", + "cranelift-codegen", + "cranelift-entity", + "cranelift-frontend", + "cranelift-native", + "cranelift-wasm", + "more-asserts", + "region", + "substrate-wasmtime-runtime", + "target-lexicon", + "thiserror", + "wasmparser", + "wasmtime-debug", + "wasmtime-environ", + "wasmtime-profiling", + "winapi 0.3.8", +] + +[[package]] +name = "substrate-wasmtime-runtime" +version = "0.13.0-threadsafe.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d08846f04293a7fc27eeb30f06262ca2e1b4ee20f5192cec1f3ce201e08ceb8" +dependencies = [ + "backtrace", + "cc", + "cfg-if", + "indexmap", + "lazy_static", + "libc", + "memoffset", + "more-asserts", + "region", + "thiserror", + "wasmtime-environ", + "wasmtime-profiling", + "winapi 0.3.8", +] + [[package]] name = "subtle" version = "1.0.0" @@ -7882,13 +8199,24 @@ checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" [[package]] name = "syn" -version = "1.0.16" +version = "0.11.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" +checksum = "d3b891b9015c88c576343b9b3e41c2c11a51c219ef067b264bd9c8aa9b441dad" +dependencies = [ + "quote 0.3.15", + "synom", + "unicode-xid 0.0.4", +] + +[[package]] +name = "syn" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0df0eb663f387145cab623dea85b09c2c5b4b0aef44e945d928e682fce71bb03" dependencies = [ "proc-macro2", - "quote", - "unicode-xid", + "quote 1.0.3", + "unicode-xid 0.2.0", ] [[package]] @@ -7898,8 +8226,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", +] + +[[package]] +name = "synom" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a393066ed9010ebaed60b9eafa373d4b1baac186dd7e008555b0f702b51945b6" +dependencies = [ + "unicode-xid 0.0.4", ] [[package]] @@ -7909,16 +8246,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ "proc-macro2", - "quote", - "syn", - "unicode-xid", + "quote 1.0.3", + "syn 1.0.17", + "unicode-xid 0.2.0", ] [[package]] name = "sysinfo" -version = "0.11.7" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e15d793f059727ad34a9245503c13b38262bb32e9906d8122ca64d6ca54b0858" +checksum = "7ccb41798287e8e299a701b5560d886d6ca2c3e7115e9ea2cb68c123aec339b7" dependencies = [ "cfg-if", "doc-comment", @@ -7941,12 +8278,6 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab0e7238dcc7b40a7be719a25365910f6807bd864f4cce6b2e6b873658e2b19d" -[[package]] -name = "target_info" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" - [[package]] name = "tempfile" version = "3.1.0" @@ -7978,9 +8309,9 @@ checksum = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" dependencies = [ "lazy_static", "proc-macro2", - "quote", - "syn", - "version_check 0.9.1", + "quote 1.0.3", + "syn 1.0.17", + "version_check", ] [[package]] @@ -7994,22 +8325,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee14bf8e6767ab4c687c9e8bc003879e042a96fd67a3ba5934eadb6536bef4db" +checksum = "f0570dc61221295909abdb95c739f2e74325e14293b2026b0a7e195091ec54ae" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.11" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7b51e1fbc44b5a0840be594fbc0f960be09050f2617e61e6aa43bef97cd3ef4" +checksum = "227362df41d566be41a28f64401e07a043157c21c14b9785a0d8e256f940a8fd" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -8043,9 +8374,9 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6848cd8f566953ce1e8faeba12ee23cbdbb0437754792cd857d44628b5685e3" +checksum = "b0165e045cc2ae1660270ca65e1676dbaab60feb0f91b10f7d0665e9b47e31f2" dependencies = [ "failure", "hmac", @@ -8068,9 +8399,9 @@ dependencies = [ [[package]] name = "tiny-keccak" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2953ca5148619bc99695c1274cb54c5275bbb913c6adad87e72eaf8db9787f69" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" dependencies = [ "crunchy", ] @@ -8111,12 +8442,13 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.12" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34bee1facdc352fba10c9c58b654e6ecb6a2250167772bf86071f7c5f2f5061" +checksum = "ee5a0dd887e37d37390c13ff8ac830f992307fe30a1fff0ab8427af67211ba28" dependencies = [ "bytes 0.5.4", "fnv", + "futures-core", "iovec", "lazy_static", "libc", @@ -8203,7 +8535,7 @@ checksum = "57fc868aae093479e3131e3d165c93b1c7474109d13c90ec0dda2a1bbfff0674" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.8", + "log", ] [[package]] @@ -8213,8 +8545,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -8226,7 +8558,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.8", + "log", "mio", "num_cpus", "parking_lot 0.9.0", @@ -8244,7 +8576,7 @@ checksum = "4adb8b3e5f86b707f1b54e7c15b6de52617a823608ccda98a15d3a24222f265a" dependencies = [ "futures-core", "rustls 0.17.0", - "tokio 0.2.12", + "tokio 0.2.16", "webpki", ] @@ -8294,7 +8626,7 @@ dependencies = [ "crossbeam-utils", "futures 0.1.29", "lazy_static", - "log 0.4.8", + "log", "num_cpus", "slab", "tokio-executor 0.1.10", @@ -8312,17 +8644,6 @@ dependencies = [ "tokio-executor 0.1.10", ] -[[package]] -name = "tokio-tls" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "354b8cd83825b3c20217a9dc174d6a0c67441a2fae5c41bcb1ea6679f6ae0f7c" -dependencies = [ - "futures 0.1.29", - "native-tls", - "tokio-io", -] - [[package]] name = "tokio-udp" version = "0.1.6" @@ -8331,7 +8652,7 @@ checksum = "e2a0b10e610b39c38b031a2fcab08e4b82f16ece36504988dcbd81dbba650d82" dependencies = [ "bytes 0.4.12", "futures 0.1.29", - "log 0.4.8", + "log", "mio", "tokio-codec", "tokio-io", @@ -8348,7 +8669,7 @@ dependencies = [ "futures 0.1.29", "iovec", "libc", - "log 0.4.8", + "log", "mio", "mio-uds", "tokio-codec", @@ -8358,16 +8679,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.2.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "571da51182ec208780505a32528fc5512a8fe1443ab960b3f2f3ef093cd16930" +checksum = "be8242891f2b6cbef26a2d7e8605133c2c554cd35b3e4948ea892d6d68436499" dependencies = [ "bytes 0.5.4", "futures-core", "futures-sink", - "log 0.4.8", + "log", "pin-project-lite", - "tokio 0.2.12", + "tokio 0.2.16", ] [[package]] @@ -8402,8 +8723,8 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" dependencies = [ - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", ] [[package]] @@ -8415,12 +8736,6 @@ dependencies = [ "lazy_static", ] -[[package]] -name = "traitobject" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "efd1f82c56340fdf16f2a953d7bda4f8fdffba13d93b00844c25572110b26079" - [[package]] name = "treeline" version = "0.1.0" @@ -8429,9 +8744,9 @@ checksum = "a7f741b240f1a48843f9b8e0444fb55fb2a4ff67293b50a9179dfd5ea67f8d41" [[package]] name = "trie-bench" -version = "0.20.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dcd9bac85703d8f974ee1e6dfe668784b105d3385c174ad729adb7427ad5d81" +checksum = "f105ed33e42b534284b691e804e909c42a8898afcf22896a32255c05a1a50488" dependencies = [ "criterion 0.2.11", "hash-db", @@ -8451,7 +8766,7 @@ checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" dependencies = [ "hash-db", "hashbrown", - "log 0.4.8", + "log", "rustc-hex", "smallvec 1.2.0", ] @@ -8483,9 +8798,9 @@ checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "trybuild" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26ff1b18659a2218332848d76ad1c867ce4c6ee37b085e6bc8de9a6d11401220" +checksum = "24b4e093c5ed1a60b22557090120aa14f90ca801549c0949d775ea07c1407720" dependencies = [ "glob 0.3.0", "lazy_static", @@ -8515,12 +8830,6 @@ dependencies = [ "rand 0.7.3", ] -[[package]] -name = "typeable" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1410f6f91f21d1612654e7cc69193b0334f909dcf2c790c4826254fbb86f8887" - [[package]] name = "typenum" version = "1.11.2" @@ -8539,22 +8848,13 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "unicase" -version = "1.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f4765f83163b74f957c797ad9253caf97f103fb064d3999aea9568d09fc8a33" -dependencies = [ - "version_check 0.1.5", -] - [[package]] name = "unicase" version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "50f37be617794602aabbeee0be4f259dc1778fabe05e2d67ee8f79326d5cb4f6" dependencies = [ - "version_check 0.9.1", + "version_check", ] [[package]] @@ -8587,6 +8887,12 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" +[[package]] +name = "unicode-xid" +version = "0.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c1f860d7d29cf02cb2f3f359fd35991af3d30bac52c57d265a3c461074cb4dc" + [[package]] name = "unicode-xid" version = "0.2.0" @@ -8595,12 +8901,13 @@ checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] name = "unsigned-varint" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b7ffb36714206d2f5f05d61a2bc350415c642f2c54433f0ebf829afbe41d570" +checksum = "f38e01ad4b98f042e166c1bf9a13f9873a99d79eaa171ce7ca81e6dd0f895d8a" dependencies = [ "bytes 0.5.4", - "futures 0.3.4", + "futures-io", + "futures-util", "futures_codec", ] @@ -8650,23 +8957,6 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a" -[[package]] -name = "vergen" -version = "3.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6aba5e34f93dc7051dfad05b98a18e9156f27e7b431fe1d2398cb6061c0a1dba" -dependencies = [ - "bitflags", - "chrono", - "failure", -] - -[[package]] -name = "version_check" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "914b1a6776c4c929a602fafd8bc742e06365d4bcbe48c30f9cca5824f70dc9dd" - [[package]] name = "version_check" version = "0.9.1" @@ -8693,15 +8983,24 @@ dependencies = [ [[package]] name = "wabt-sys" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af5d153dc96aad7dc13ab90835b892c69867948112d95299e522d370c4e13a08" +checksum = "23d7043ebb3e5d96fad7a8d3ca22ee9880748ff8c3e18092cfb2a49d3b8f9084" dependencies = [ "cc", "cmake", "glob 0.2.11", ] +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.3.1" @@ -8720,7 +9019,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230" dependencies = [ "futures 0.1.29", - "log 0.4.8", + "log", "try-lock", ] @@ -8730,7 +9029,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1ce8a968cb1cd110d136ff8b819a556d6fb6d919363c61534f6860c7eb172ba0" dependencies = [ - "log 0.4.8", + "log", "try-lock", ] @@ -8742,9 +9041,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasm-bindgen" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5205e9afdf42282b192e2310a5b463a6d1c1d774e30dc3c791ac37ab42d2616c" +checksum = "2cc57ce05287f8376e998cbddfb4c8cb43b84a7ec55cf4551d7c00eef317a47f" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -8752,24 +9051,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11cdb95816290b525b32587d76419facd99662a07e59d3cdb560488a819d9a45" +checksum = "d967d37bf6c16cca2973ca3af071d0a2523392e4a594548155d89a678f4237cd" dependencies = [ "bumpalo", "lazy_static", - "log 0.4.8", + "log", "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.8" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bbdd49e3e28b40dec6a9ba8d17798245ce32b019513a845369c641b275135d9" +checksum = "7add542ea1ac7fdaa9dc25e031a6af33b7d63376292bd24140c637d00d1c312a" dependencies = [ "cfg-if", "js-sys", @@ -8779,48 +9078,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" +checksum = "8bd151b63e1ea881bb742cd20e1d6127cef28399558f3b5d415289bc41eee3a4" dependencies = [ - "quote", + "quote 1.0.3", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" +checksum = "d68a5b36eef1be7868f668632863292e37739656a80fc4b9acec7b0bd35a4931" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.58" +version = "0.2.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5e7e61fc929f4c0dddb748b102ebf9f632e2b8d739f2016542b4de2965a9601" - -[[package]] -name = "wasm-bindgen-webidl" -version = "0.2.58" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef012a0d93fc0432df126a8eaf547b2dce25a8ce9212e1d3cbeef5c11157975d" -dependencies = [ - "anyhow", - "heck", - "log 0.4.8", - "proc-macro2", - "quote", - "syn", - "wasm-bindgen-backend", - "weedle", -] +checksum = "daf76fe7d25ac79748a37538b7daeed1c7a6867c92d3245c12c6222e4a20d639" [[package]] name = "wasm-gc-api" @@ -8828,7 +9111,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0c32691b6c7e6c14e7f8fd55361a9088b507aa49620fcd06c09b3a1082186b9" dependencies = [ - "log 0.4.8", + "log", "parity-wasm 0.32.0", "rustc-demangle", ] @@ -8859,7 +9142,7 @@ dependencies = [ "libc", "memory_units", "num-rational", - "num-traits", + "num-traits 0.2.11", "parity-wasm 0.41.0", "wasmi-validation", ] @@ -8879,32 +9162,11 @@ version = "0.51.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aeb1956b19469d1c5e63e459d29e7b5aa0f558d9f16fcef09736f8a265e6c10a" -[[package]] -name = "wasmtime" -version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" -dependencies = [ - "anyhow", - "backtrace", - "cfg-if", - "lazy_static", - "libc", - "region", - "rustc-demangle", - "target-lexicon", - "wasmparser", - "wasmtime-environ", - "wasmtime-jit", - "wasmtime-profiling", - "wasmtime-runtime", - "wat", - "winapi 0.3.8", -] - [[package]] name = "wasmtime-debug" version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d3d007436043bf55ec252d2f4dc1d35834157b5e2f148da839ca502e611cfe1" dependencies = [ "anyhow", "faerie", @@ -8919,7 +9181,8 @@ dependencies = [ [[package]] name = "wasmtime-environ" version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "80f3dea0e60c076dd0da27fa10c821323903c9554c617ed32eaab8e7a7e36c89" dependencies = [ "anyhow", "base64 0.11.0", @@ -8932,7 +9195,7 @@ dependencies = [ "file-per-thread-logger", "indexmap", "libc", - "log 0.4.8", + "log", "more-asserts", "rayon", "serde", @@ -8944,34 +9207,11 @@ dependencies = [ "zstd", ] -[[package]] -name = "wasmtime-jit" -version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" -dependencies = [ - "anyhow", - "cfg-if", - "cranelift-codegen", - "cranelift-entity", - "cranelift-frontend", - "cranelift-native", - "cranelift-wasm", - "more-asserts", - "region", - "target-lexicon", - "thiserror", - "wasmparser", - "wasmtime-debug", - "wasmtime-environ", - "wasmtime-profiling", - "wasmtime-runtime", - "winapi 0.3.8", -] - [[package]] name = "wasmtime-profiling" version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "984d29c8add3381e60d649f4e3e2a501da900fc2d2586e139502eec32fe0ebc8" dependencies = [ "gimli", "goblin", @@ -8983,55 +9223,32 @@ dependencies = [ "target-lexicon", ] -[[package]] -name = "wasmtime-runtime" -version = "0.12.0" -source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" -dependencies = [ - "backtrace", - "cc", - "cfg-if", - "indexmap", - "lazy_static", - "libc", - "memoffset", - "more-asserts", - "region", - "thiserror", - "wasmtime-environ", - "wasmtime-profiling", - "winapi 0.3.8", -] - [[package]] name = "wast" -version = "9.0.0" +version = "13.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee7b16105405ca2aa2376ba522d8d4b1a11604941dd3bb7df9fd2ece60f8d16a" +checksum = "5b20abd8b4a26f7e0d4dd5e357e90a3d555ec190e94472c9b2b27c5b9777f9ae" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.10" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56173f7f4fb59aebe35a7e71423845e1c6c7144bfb56362d497931b6b3bed0f6" +checksum = "51a615830ee3e7200b505c441fec09aac2f114deae69df52f215cb828ba112c4" dependencies = [ "wast", ] [[package]] name = "web-sys" -version = "0.3.35" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aaf97caf6aa8c2b1dac90faf0db529d9d63c93846cca4911856f78a83cebf53b" +checksum = "2d6f51648d8c56c366144378a33290049eafdd784071077f6fe37dae64c1c4cb" dependencies = [ - "anyhow", "js-sys", - "sourcefile", "wasm-bindgen", - "wasm-bindgen-webidl", ] [[package]] @@ -9062,61 +9279,11 @@ dependencies = [ "webpki", ] -[[package]] -name = "websocket" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413b37840b9e27b340ce91b319ede10731de8c72f5bc4cb0206ec1ca4ce581d0" -dependencies = [ - "bytes 0.4.12", - "futures 0.1.29", - "hyper 0.10.16", - "native-tls", - "rand 0.6.5", - "tokio-codec", - "tokio-io", - "tokio-reactor", - "tokio-tcp", - "tokio-tls", - "unicase 1.4.2", - "url 1.7.2", - "websocket-base", -] - -[[package]] -name = "websocket-base" -version = "0.24.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e3810f0d00c4dccb54c30a4eee815e703232819dec7b007db115791c42aa374" -dependencies = [ - "base64 0.10.1", - "bitflags", - "byteorder 1.3.4", - "bytes 0.4.12", - "futures 0.1.29", - "native-tls", - "rand 0.6.5", - "sha1", - "tokio-codec", - "tokio-io", - "tokio-tcp", - "tokio-tls", -] - -[[package]] -name = "weedle" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bb43f70885151e629e2a19ce9e50bd730fd436cfd4b666894c9ce4de9141164" -dependencies = [ - "nom", -] - [[package]] name = "which" -version = "3.1.0" +version = "3.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5475d47078209a02e60614f7ba5e645ef3ed60f771920ac1906d7c1cc65024c8" +checksum = "d011071ae14a2f6671d0b74080ae0cd8ebf3a6f8c9589a2cd45f23126fe29724" dependencies = [ "libc", ] @@ -9151,9 +9318,9 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ccfbf554c6ad11084fb7517daca16cfdcaccbdadba4fc336f032a8b12c2ad80" +checksum = "fa515c5163a99cc82bab70fd3bfdd36d827be85de63737b40fcef2ce084a436e" dependencies = [ "winapi 0.3.8", ] @@ -9173,7 +9340,7 @@ dependencies = [ "byteorder 1.3.4", "bytes 0.4.12", "httparse", - "log 0.4.8", + "log", "mio", "mio-extras", "rand 0.7.3", @@ -9198,9 +9365,9 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "637ff90c9540fa3073bb577e65033069e4bae7c79d49d74aa3ffdf5342a53217" dependencies = [ - "curve25519-dalek 2.0.0", + "curve25519-dalek", "rand_core 0.5.1", - "zeroize 1.1.0", + "zeroize", ] [[package]] @@ -9211,25 +9378,18 @@ checksum = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" [[package]] name = "yamux" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03098897b734bd943ab23f6aa9f98aafd72a88516deedd66f9d564c57bf2f19" +checksum = "84300bb493cc878f3638b981c62b4632ec1a5c52daaa3036651e8c106d3b55ea" dependencies = [ - "bytes 0.5.4", "futures 0.3.4", - "log 0.4.8", + "log", "nohash-hasher", "parking_lot 0.10.0", "rand 0.7.3", - "thiserror", + "static_assertions", ] -[[package]] -name = "zeroize" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" - [[package]] name = "zeroize" version = "1.1.0" @@ -9246,8 +9406,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ "proc-macro2", - "quote", - "syn", + "quote 1.0.3", + "syn 1.0.17", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 856124dae6dbee5dfece8918182ce3124851ab94..ab19142da6ee664e636dd2c39de6317bbe16c5a0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ members = [ "bin/node-template/node", "bin/node-template/runtime", "bin/node-template/pallets/template", + "bin/node/bench", "bin/node/cli", "bin/node/executor", "bin/node/primitives", @@ -86,8 +87,10 @@ members = [ "frame/offences", "frame/randomness-collective-flip", "frame/recovery", + "frame/scheduler", "frame/scored-pool", "frame/session", + "frame/session/benchmarking", "frame/society", "frame/staking", "frame/staking/reward-curve", @@ -117,7 +120,9 @@ members = [ "primitives/consensus/babe", "primitives/consensus/common", "primitives/consensus/pow", + "primitives/consensus/vrf", "primitives/core", + "primitives/chain-spec", "primitives/debug-derive", "primitives/storage", "primitives/externalities", @@ -128,10 +133,12 @@ members = [ "primitives/offchain", "primitives/panic-handler", "primitives/phragmen", + "primitives/phragmen/compact", "primitives/rpc", "primitives/runtime-interface", "primitives/runtime-interface/proc-macro", "primitives/runtime-interface/test-wasm", + "primitives/runtime-interface/test-wasm-deprecated", "primitives/runtime-interface/test", "primitives/serializer", "primitives/session", @@ -139,6 +146,7 @@ members = [ "primitives/api/proc-macro", "primitives/api/test", "primitives/arithmetic", + "primitives/arithmetic/fuzzer", "primitives/io", "primitives/runtime", "primitives/sandbox", @@ -150,6 +158,7 @@ members = [ "primitives/test-primitives", "primitives/transaction-pool", "primitives/trie", + "primitives/utils", "primitives/wasm-interface", "test-utils/client", "test-utils/runtime", diff --git a/bin/node-template/README.md b/bin/node-template/README.md index 4ae60478fc72b77f32664b4225c023c36ff5f1af..c1730d51e5c137e6a02ebee623f6a5fba25a2630 100644 --- a/bin/node-template/README.md +++ b/bin/node-template/README.md @@ -56,7 +56,7 @@ cargo run -- \ --chain=local \ --alice \ --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ - --telemetry-url ws://telemetry.polkadot.io:1024 \ + --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ --validator ``` @@ -69,7 +69,7 @@ cargo run -- \ --chain=local \ --bob \ --port 30334 \ - --telemetry-url ws://telemetry.polkadot.io:1024 \ + --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ --validator ``` diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 1e8c3fad2e3c0e2f1a8cef0f2d7fff57f2b45ff3..a2ccc4fa95732b35da44cfa73e4165514ba2e4ea 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,7 +1,8 @@ [package] name = "node-template" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Anonymous"] +description = "Substrate Node template" edition = "2018" license = "Unlicense" build = "build.rs" @@ -12,30 +13,32 @@ repository = "https://github.com/paritytech/substrate/" name = "node-template" [dependencies] -futures = "0.3.1" +futures = "0.3.4" log = "0.4.8" structopt = "0.3.8" -sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } -sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8.0-alpha.2", path = "../../../client/network" } -sc-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sc-basic-authorship = { path = "../../../client/basic-authorship" , version = "0.8.0-alpha.2"} +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../../client/executor" } +sc-service = { version = "0.8.0-alpha.5", path = "../../../client/service" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8.0-alpha.5", path = "../../../client/network" } +sc-consensus-aura = { version = "0.8.0-alpha.5", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sc-finality-grandpa = { version = "0.8.0-alpha.5", path = "../../../client/finality-grandpa" } +sp-finality-grandpa = { version = "2.0.0-alpha.5", path = "../../../primitives/finality-grandpa" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client/" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sc-basic-authorship = { path = "../../../client/basic-authorship", version = "0.8.0-alpha.5"} -node-template-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +node-template-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } [build-dependencies] -vergen = "3.0.4" -build-script-utils = { version = "2.0.0-alpha.2", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } +substrate-build-script-utils = { version = "2.0.0-alpha.5", path = "../../../utils/build-script-utils" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/node/build.rs b/bin/node-template/node/build.rs index 222cbb409285b40e7204cd609d444854dd4082aa..e3bfe3116bf28dba1872f7d0b64c2ee0c9c71c3c 100644 --- a/bin/node-template/node/build.rs +++ b/bin/node-template/node/build.rs @@ -1,9 +1,7 @@ -use vergen::{ConstantsFlags, generate_cargo_keys}; - -const ERROR_MSG: &str = "Failed to generate metadata files"; +use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { - generate_cargo_keys(ConstantsFlags::SHA_SHORT).expect(ERROR_MSG); + generate_cargo_keys(); - build_script_utils::rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index f1a7e29d44cb44e297bbb65591b8ed8396a7c37d..fb53edd9a1a06b44fd2626feda427b79b0c08a5f 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -3,10 +3,10 @@ use node_template_runtime::{ AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, SudoConfig, SystemConfig, WASM_BINARY, Signature }; -use sp_consensus_aura::sr25519::{AuthorityId as AuraId}; -use grandpa_primitives::{AuthorityId as GrandpaId}; -use sc_service; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_finality_grandpa::AuthorityId as GrandpaId; use sp_runtime::traits::{Verify, IdentifyAccount}; +use sc_service::ChainType; // Note this is the URL for the telemetry server //const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -14,17 +14,6 @@ use sp_runtime::traits::{Verify, IdentifyAccount}; /// Specialized `ChainSpec`. This is a specialization of the general Substrate ChainSpec type. pub type ChainSpec = sc_service::GenericChainSpec; -/// The chain specification option. This is expected to come in from the CLI and -/// is little more than one of a number of alternatives which can easily be converted -/// from a string (`--chain=...`) into a `ChainSpec`. -#[derive(Clone, Debug)] -pub enum Alternative { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob auths. - LocalTestnet, -} - /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { TPublic::Pair::from_string(&format!("//{}", seed), None) @@ -42,80 +31,72 @@ pub fn get_account_id_from_seed(seed: &str) -> AccountId where } /// Helper function to generate an authority key for Aura -pub fn get_authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { +pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { ( get_from_seed::(s), get_from_seed::(s), ) } -impl Alternative { - /// Get an actual chain config from one of the alternatives. - pub(crate) fn load(self) -> Result { - Ok(match self { - Alternative::Development => ChainSpec::from_genesis( - "Development", - "dev", - || testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None - ), - Alternative::LocalTestnet => ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - || testnet_genesis( - vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None - ), - }) - } +pub fn development_config() -> ChainSpec { + ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + || testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None, + ) +} - pub(crate) fn from(s: &str) -> Option { - match s { - "dev" => Some(Alternative::Development), - "" | "local" => Some(Alternative::LocalTestnet), - _ => None, - } - } +pub fn local_testnet_config() -> ChainSpec { + ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + || testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ), + vec![], + None, + None, + None, + None, + ) } fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, @@ -141,10 +122,3 @@ fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, }), } } - -pub fn load_spec(id: &str) -> Result, String> { - Ok(match Alternative::from(id) { - Some(spec) => Box::new(spec.load()?), - None => Box::new(ChainSpec::from_json_file(std::path::PathBuf::from(id))?), - }) -} diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 0f4c301dbff5b7defe0565bf71e85b21a6418867..75b88877aadfbe731dd16806e334948678fb24be 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -14,36 +14,64 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_cli::VersionInfo; -use crate::service; use crate::chain_spec; use crate::cli::Cli; +use crate::service; +use sc_cli::SubstrateCli; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; -/// Parse and run command line arguments -pub fn run(version: VersionInfo) -> sc_cli::Result<()> { - let opt = sc_cli::from_args::(&version); +impl SubstrateCli for Cli { + fn impl_name() -> &'static str { + "Substrate Node" + } + + fn impl_version() -> &'static str { + env!("SUBSTRATE_CLI_IMPL_VERSION") + } + + fn description() -> &'static str { + env!("CARGO_PKG_DESCRIPTION") + } + + fn author() -> &'static str { + env!("CARGO_PKG_AUTHORS") + } - let mut config = sc_service::Configuration::from_version(&version); + fn support_url() -> &'static str { + "support.anonymous.an" + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn executable_name() -> &'static str { + env!("CARGO_PKG_NAME") + } + + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()), + "" | "local" => Box::new(chain_spec::local_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }) + } +} + +/// Parse and run command line arguments +pub fn run() -> sc_cli::Result<()> { + let cli = Cli::from_args(); - match opt.subcommand { + match &cli.subcommand { Some(subcommand) => { - subcommand.init(&version)?; - subcommand.update_config(&mut config, chain_spec::load_spec, &version)?; - subcommand.run( - config, - |config: _| Ok(new_full_start!(config).0), - ) - }, + let runner = cli.create_runner(subcommand)?; + runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + } None => { - opt.run.init(&version)?; - opt.run.update_config(&mut config, chain_spec::load_spec, &version)?; - opt.run.run( - config, - service::new_light, - service::new_full, - &version, - ) - }, + let runner = cli.create_runner(&cli.run)?; + runner.run_node(service::new_light, service::new_full) + } } } diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 91b2c257e0cd733f7e87d00e02857101810a2fb0..369e6932a030811b542ae8de9f26e9324f22e069 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -8,16 +8,5 @@ mod cli; mod command; fn main() -> sc_cli::Result<()> { - let version = sc_cli::VersionInfo { - name: "Substrate Node", - commit: env!("VERGEN_SHA_SHORT"), - version: env!("CARGO_PKG_VERSION"), - executable_name: "node-template", - author: "Anonymous", - description: "Template Node", - support_url: "support.anonymous.an", - copyright_start_year: 2017, - }; - - command::run(version) + command::run() } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 4b66f90fbb3732bbc07e59ced32a40717bd1b559..7c4a574f6be04c823e182671b1b7930bfdb5394d 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -10,7 +10,7 @@ use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; +use sc_finality_grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; // Our native executor instance. native_executor_instance!( @@ -44,7 +44,7 @@ macro_rules! new_full_start { .ok_or_else(|| sc_service::Error::SelectChainRequired)?; let (grandpa_block_import, grandpa_link) = - grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; + sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( grandpa_block_import.clone(), client.clone(), @@ -72,16 +72,11 @@ macro_rules! new_full_start { pub fn new_full(config: Configuration) -> Result { - let is_authority = config.roles.is_authority(); + let role = config.role.clone(); let force_authoring = config.force_authoring; - let name = config.name.clone(); + let name = config.network.node_name.clone(); let disable_grandpa = config.disable_grandpa; - // sentry nodes announce themselves as authorities to the network - // and should run the same protocols authorities do, but it should - // never actively participate in any consensus process. - let participates_in_consensus = is_authority && !config.sentry_mode; - let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); let (block_import, grandpa_link) = @@ -96,11 +91,9 @@ pub fn new_full(config: Configuration) })? .build()?; - if participates_in_consensus { - let proposer = sc_basic_authorship::ProposerFactory::new( - service.client(), - service.transaction_pool() - ); + if role.is_authority() { + let proposer = + sc_basic_authorship::ProposerFactory::new(service.client(), service.transaction_pool()); let client = service.client(); let select_chain = service.select_chain() @@ -129,20 +122,20 @@ pub fn new_full(config: Configuration) // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if participates_in_consensus { + let keystore = if role.is_authority() { Some(service.keystore()) } else { None }; - let grandpa_config = grandpa::Config { + let grandpa_config = sc_finality_grandpa::Config { // FIXME #1578 make this available through chainspec gossip_duration: Duration::from_millis(333), justification_period: 512, name: Some(name), observer_enabled: false, keystore, - is_authority, + is_authority: role.is_network_authority(), }; let enable_grandpa = !disable_grandpa; @@ -153,13 +146,13 @@ pub fn new_full(config: Configuration) // and vote data availability than the observer. The observer has not // been tested extensively yet and having most nodes in a network run it // could lead to finality stalls. - let grandpa_config = grandpa::GrandpaParams { + let grandpa_config = sc_finality_grandpa::GrandpaParams { config: grandpa_config, link: grandpa_link, network: service.network(), inherent_data_providers: inherent_data_providers.clone(), telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), + voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), prometheus_registry: service.prometheus_registry() }; @@ -167,10 +160,10 @@ pub fn new_full(config: Configuration) // if it fails we take down the service with it. service.spawn_essential_task( "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? + sc_finality_grandpa::run_grandpa_voter(grandpa_config)? ); } else { - grandpa::setup_disabled_grandpa( + sc_finality_grandpa::setup_disabled_grandpa( service.client(), &inherent_data_providers, service.network(), @@ -204,7 +197,7 @@ pub fn new_light(config: Configuration) let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import( + let grandpa_block_import = sc_finality_grandpa::light_block_import( client.clone(), backend, &(client.clone() as Arc<_>), diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index b39fcc1dae4d5bdb3b71edfaac02c55783879e29..69fcd843526806c9fe9a0b6e684a292fcbfe778b 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -2,39 +2,37 @@ authors = ['Anonymous'] edition = '2018' name = 'pallet-template' -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" license = "Unlicense" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet template" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -safe-mix = { default-features = false, version = '1.0.0' } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } [dependencies.frame-support] default-features = false -version = "2.0.0-alpha.2" +version = "2.0.0-alpha.5" path = "../../../../frame/support" -[dependencies.system] +[dependencies.frame-system] default-features = false -package = 'frame-system' -version = "2.0.0-alpha.2" +version = "2.0.0-alpha.5" path = "../../../../frame/system" [dev-dependencies.sp-core] default-features = false -version = "2.0.0-alpha.2" +version = "2.0.0-alpha.5" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = "2.0.0-alpha.2" +version = "2.0.0-alpha.5" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = "2.0.0-alpha.2" +version = "2.0.0-alpha.5" path = "../../../../primitives/runtime" @@ -43,6 +41,8 @@ default = ['std'] std = [ 'codec/std', 'frame-support/std', - 'safe-mix/std', - 'system/std' + 'frame-system/std' ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index 892778adeb46b431cb46e87dd4e7ac4c1cea129b..a0daecfb72c9a14ffa7e7819400f0345c542b5b8 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -10,7 +10,7 @@ /// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch}; -use system::ensure_signed; +use frame_system::{self as system, ensure_signed}; #[cfg(test)] mod mock; @@ -75,6 +75,7 @@ decl_module! { /// Just a dummy entry point. /// function that can be called by the external world as an extrinsics call /// takes a parameter of the type `AccountId`, stores it, and emits an event + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn do_something(origin, something: u32) -> dispatch::DispatchResult { // Check it was signed and get the signer. See also: ensure_root and ensure_none let who = ensure_signed(origin)?; @@ -90,6 +91,7 @@ decl_module! { /// Another dummy entry point. /// takes no parameters, attempts to increment storage value, and possibly throws an error + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn cause_error(origin) -> dispatch::DispatchResult { // Check it was signed and get the signer. See also: ensure_root and ensure_none let _who = ensure_signed(origin)?; diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 2ea81ffb456261122b234c6ae3c36f52eaa9c430..a93ac0359e3a2f2b8f4e0e558a0b254e33eb1c7e 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -6,6 +6,7 @@ use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_runtime::{ traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, }; +use frame_system as system; impl_outer_origin! { pub enum Origin for Test {} diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index 9268dd8c05035f4dd8022702e9792f421ab556c5..e8653e6df70e82e503a54e79c638e9a796e41913 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-template-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Anonymous"] edition = "2018" license = "Unlicense" @@ -8,33 +8,33 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } -aura = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } -balances = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } -randomness-collective-flip = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } -sudo = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } -system = { version = "2.0.0-alpha.2", default-features = false, package = "frame-system", path = "../../../frame/system" } -timestamp = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } -transaction-payment = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/executive" } +aura = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } +balances = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/support" } +grandpa = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } +randomness-collective-flip = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } +sudo = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +system = { version = "2.0.0-alpha.5", default-features = false, package = "frame-system", path = "../../../frame/system" } +timestamp = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } +transaction-payment = { version = "2.0.0-alpha.5", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.2"} -sp-consensus-aura = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-alpha.2"} -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/io" } -sp-offchain = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.5"} +sp-consensus-aura = { version = "0.8.0-alpha.5", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-alpha.5"} +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/io" } +sp-offchain = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/version" } -template = { version = "2.0.0-alpha.2", default-features = false, path = "../pallets/template", package = "pallet-template" } +template = { version = "2.0.0-alpha.5", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -68,3 +68,6 @@ std = [ "transaction-payment/std", "template/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 0414759a5ad86f5c5be4adb1cc5031d4adf1b4e7..94f033fd8f58ee231f412e7a2cf6f7f53f77d49a 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -11,8 +11,8 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); use sp_std::prelude::*; use sp_core::OpaqueMetadata; use sp_runtime::{ - ApplyExtrinsicResult, transaction_validity::TransactionValidity, generic, create_runtime_str, - impl_opaque_keys, MultiSignature, + ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, + transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_runtime::traits::{ BlakeTwo256, Block as BlockT, IdentityLookup, Verify, ConvertInto, IdentifyAccount @@ -318,8 +318,11 @@ impl_runtime_apis! { } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { - Executive::validate_transaction(tx) + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) } } diff --git a/bin/node/bench/Cargo.toml b/bin/node/bench/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..22e7fe51d848e1f44ca0c34eddb57d39ca0d0c32 --- /dev/null +++ b/bin/node/bench/Cargo.toml @@ -0,0 +1,20 @@ +[package] +name = "node-bench" +version = "0.8.0-alpha.5" +authors = ["Parity Technologies "] +description = "Substrate node integration benchmarks." +edition = "2018" +license = "GPL-3.0" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] +log = "0.4.8" +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +node-testing = { version = "2.0.0-alpha.5", path = "../testing" } +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api/" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +serde = "1.0.101" +serde_json = "1.0.41" +structopt = "0.3" \ No newline at end of file diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs new file mode 100644 index 0000000000000000000000000000000000000000..a8164db75a53833855ad996cd1aceaa24e232123 --- /dev/null +++ b/bin/node/bench/src/core.rs @@ -0,0 +1,140 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::{fmt, borrow::{Cow, ToOwned}}; +use serde::Serialize; + +pub struct Path(Vec); + +impl Path { + pub fn new(initial: &'static [&'static str]) -> Self { + Path(initial.iter().map(|x| x.to_string()).collect()) + } +} + +impl Path { + pub fn push(&mut self, item: &str) { + self.0.push(item.to_string()); + } + + pub fn full(&self) -> String { + self.0.iter().fold(String::new(), |mut val, next| { val.push_str("::"); val.push_str(next); val }) + } + + pub fn has(&self, path: &str) -> bool { + self.full().contains(path) + } +} + +pub trait BenchmarkDescription { + fn path(&self) -> Path; + + fn setup(self: Box) -> Box; + + fn name(&self) -> Cow<'static, str>; +} + +pub trait Benchmark { + fn run(&mut self) -> std::time::Duration; +} + +#[derive(Debug, Clone, Serialize)] +pub struct BenchmarkOutput { + name: String, + raw_average: u64, + average: u64, +} + +struct NsFormatter(u64); + +impl fmt::Display for NsFormatter { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let v = self.0; + + if v < 100 { + return write!(f, "{} ns", v) + } + + if self.0 < 10_000 { + return write!(f, "{:.1} µs", v as f64 / 1000.0) + } + + if self.0 < 1_000_000 { + return write!(f, "{:.1} ms", v as f64 / 1_000_000.0) + } + + if self.0 < 100_000_000 { + return write!(f, "{} ms", v as f64 / 1_000_000.0) + } + + write!(f, "{:.2} s", v as f64 / 1_000_000_000.0) + } +} + +impl fmt::Display for BenchmarkOutput { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "({}: avg {}, w_avg {})", + self.name, + NsFormatter(self.raw_average), + NsFormatter(self.average), + ) + } +} + +pub fn run_benchmark(benchmark: Box) -> BenchmarkOutput { + let name = benchmark.name().to_owned(); + let mut benchmark = benchmark.setup(); + + let mut durations: Vec = vec![]; + for _ in 0..50 { + let duration = benchmark.run(); + durations.push(duration.as_nanos()); + } + + durations.sort(); + + let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; + let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; + + BenchmarkOutput { + name: name.into(), + raw_average, + average, + } +} + +macro_rules! matrix( + ( $var:ident in $over:expr => $tt:expr, $( $rest:tt )* ) => { + { + let mut res = Vec::>::new(); + for $var in $over.iter() { + res.push(Box::new($tt)); + } + res.extend(matrix!( $($rest)* )); + res + } + }; + ( $var:expr, $( $rest:tt )*) => { + { + let mut res = vec![Box::new($var) as Box]; + res.extend(matrix!( $($rest)* )); + res + } + }; + () => { vec![] } +); \ No newline at end of file diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs new file mode 100644 index 0000000000000000000000000000000000000000..20181bf4c7f7de816226ab76701d2c9339d56fa8 --- /dev/null +++ b/bin/node/bench/src/import.rs @@ -0,0 +1,136 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Block import benchmark. +//! +//! This benchmark is expected to measure block import operation of +//! some more or less full block. +//! +//! As we also want to protect against cold-cache attacks, this +//! benchmark should not rely on any caching (except those that +//! DO NOT depend on user input). Thus block generation should be +//! based on randomized operation. +//! +//! This is supposed to be very simple benchmark and is not subject +//! to much configuring - just block full of randomized transactions. +//! It is not supposed to measure runtime modules weight correctness + +use std::borrow::Cow; + +use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes}; +use node_primitives::Block; +use sc_client_api::backend::Backend; +use sp_runtime::generic::BlockId; + +use crate::core::{self, Path}; + +#[derive(Clone, Copy, Debug)] +pub enum SizeType { Small, Medium, Large } + +impl SizeType { + fn transactions(&self) -> usize { + match self { + SizeType::Small => 10, + SizeType::Medium => 100, + SizeType::Large => 500, + } + } +} + +pub struct ImportBenchmarkDescription { + pub profile: Profile, + pub key_types: KeyTypes, + pub size: SizeType, +} + +pub struct ImportBenchmark { + profile: Profile, + database: BenchDb, + block: Block, +} + +impl core::BenchmarkDescription for ImportBenchmarkDescription { + fn path(&self) -> Path { + + let mut path = Path::new(&["node", "import"]); + + match self.profile { + Profile::Wasm => path.push("wasm"), + Profile::Native => path.push("native"), + } + + match self.key_types { + KeyTypes::Sr25519 => path.push("sr25519"), + KeyTypes::Ed25519 => path.push("ed25519"), + } + + match self.size { + SizeType::Small => path.push("small"), + SizeType::Medium => path.push("medium"), + SizeType::Large => path.push("large"), + } + + path + } + + fn setup(self: Box) -> Box { + let profile = self.profile; + let mut bench_db = BenchDb::with_key_types(self.size.transactions(), self.key_types); + let block = bench_db.generate_block(BlockType::RandomTransfers(self.size.transactions())); + Box::new(ImportBenchmark { + database: bench_db, + block, + profile, + }) + } + + fn name(&self) -> Cow<'static, str> { + match self.profile { + Profile::Wasm => "Import benchmark (random transfers, wasm)".into(), + Profile::Native => "Import benchmark (random transfers, native)".into(), + } + } +} + +impl core::Benchmark for ImportBenchmark { + fn run(&mut self) -> std::time::Duration { + let mut context = self.database.create_context(self.profile); + + let _ = context.client.runtime_version_at(&BlockId::Number(0)) + .expect("Failed to get runtime version") + .spec_version; + + let start = std::time::Instant::now(); + context.import_block(self.block.clone()); + let elapsed = start.elapsed(); + + log::info!( + target: "bench-logistics", + "imported block with {} tx, took: {:#?}", + self.block.extrinsics.len(), + elapsed, + ); + + log::info!( + target: "bench-logistics", + "usage info: {}", + context.backend.usage_info() + .expect("RocksDB backend always provides usage info!"), + ); + + elapsed + } +} \ No newline at end of file diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs new file mode 100644 index 0000000000000000000000000000000000000000..8f045465262856fcb907f9aa5dfee68f7f05fc88 --- /dev/null +++ b/bin/node/bench/src/main.rs @@ -0,0 +1,95 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#[macro_use] mod core; +mod import; + +use crate::core::run_benchmark; +use import::{ImportBenchmarkDescription, SizeType}; +use node_testing::bench::{Profile, KeyTypes}; +use structopt::StructOpt; + +#[derive(Debug, StructOpt)] +#[structopt(name = "node-bench", about = "Node integration benchmarks")] +struct Opt { + /// Show list of all available benchmarks. + /// + /// Will output ("name", "path"). Benchmarks can then be filtered by path. + #[structopt(short, long)] + list: bool, + + /// Machine readable json output. + /// + /// This also suppresses all regular output (except to stderr) + #[structopt(short, long)] + json: bool, + + /// Filter benchmarks. + /// + /// Run with `--list` for the hint of what to filter. + filter: Option, +} + +fn main() { + let opt = Opt::from_args(); + + if !opt.json { + sc_cli::init_logger(""); + } + + let benchmarks = matrix!( + profile in [Profile::Wasm, Profile::Native] => + ImportBenchmarkDescription { + profile: *profile, + key_types: KeyTypes::Sr25519, + size: SizeType::Medium, + }, + ImportBenchmarkDescription { + profile: Profile::Native, + key_types: KeyTypes::Ed25519, + size: SizeType::Medium, + }, + size in [SizeType::Small, SizeType::Large] => + ImportBenchmarkDescription { + profile: Profile::Native, + key_types: KeyTypes::Sr25519, + size: *size, + }, + ); + + if opt.list { + for benchmark in benchmarks.iter() { + log::info!("{}: {}", benchmark.name(), benchmark.path().full()) + } + return; + } + + let mut results = Vec::new(); + for benchmark in benchmarks { + if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) { + log::info!("Starting {}", benchmark.name()); + let result = run_benchmark(benchmark); + log::info!("{}", result); + + results.push(result); + } + } + + if opt.json { + let json_result: String = serde_json::to_string(&results).expect("Failed to construct json"); + println!("{}", json_result); + } +} \ No newline at end of file diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index e18b6b228e65553543891d6e5cbcd6fb6094c752..9842fd2ee49ac7650b441970393d3ba440723c64 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,8 +1,8 @@ [package] name = "node-cli" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] -description = "Substrate node implementation in Rust." +description = "Generic Substrate node implementation in Rust." build = "build.rs" edition = "2018" license = "GPL-3.0" @@ -16,7 +16,7 @@ repository = "https://github.com/paritytech/substrate/" wasm-opt = false [badges] -travis-ci = { repository = "paritytech/substrate", branch = "master" } +travis-ci = { repository = "paritytech/substrate" } maintenance = { status = "actively-developed" } is-it-maintained-issue-resolution = { repository = "paritytech/substrate" } is-it-maintained-open-issues = { repository = "paritytech/substrate" } @@ -31,7 +31,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" @@ -42,94 +42,98 @@ structopt = { version = "0.3.8", optional = true } tracing = "0.1.10" # primitives -sp-authority-discovery = { version = "2.0.0-alpha.2", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/timestamp" } -sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/finality-tracker" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-authority-discovery = { version = "2.0.0-alpha.5", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "2.0.0-alpha.5", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/timestamp" } +sp-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/finality-tracker" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../../primitives/keyring" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } # client dependencies -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } -sc-chain-spec = { version = "2.0.0-alpha.2", path = "../../../client/chain-spec" } -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8.0-alpha.2", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0-alpha.2", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8.0-alpha.2", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0-alpha.2", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8.0-alpha.2", path = "../../../client/authority-discovery" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client/" } +sc-chain-spec = { version = "2.0.0-alpha.5", path = "../../../client/chain-spec" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8.0-alpha.5", path = "../../../client/network" } +sc-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../client/consensus/babe" } +grandpa = { version = "0.8.0-alpha.5", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8.0-alpha.5", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "2.0.0-alpha.5", path = "../../../client/offchain" } +sc-rpc = { version = "2.0.0-alpha.5", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.8.0-alpha.5", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0-alpha.5", path = "../../../client/tracing" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.8.0-alpha.5", path = "../../../client/authority-discovery" } # frame dependencies -pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0-alpha.2", path = "../../../frame/authority-discovery" } +pallet-indices = { version = "2.0.0-alpha.5", path = "../../../frame/indices" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/timestamp" } +pallet-contracts = { version = "2.0.0-alpha.5", path = "../../../frame/contracts" } +frame-system = { version = "2.0.0-alpha.5", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../../../frame/transaction-payment" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "2.0.0-alpha.5", path = "../../../frame/authority-discovery" } +pallet-staking = { version = "2.0.0-alpha.5", path = "../../../frame/staking" } # node-specific dependencies -node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } -node-rpc = { version = "2.0.0-alpha.2", path = "../rpc" } -node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } -node-executor = { version = "2.0.0-alpha.2", path = "../executor" } +node-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } +node-rpc = { version = "2.0.0-alpha.5", path = "../rpc" } +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +node-executor = { version = "2.0.0-alpha.5", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0-alpha.2", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0-alpha.2", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-transaction-factory = { version = "0.8.0-alpha.2", optional = true, path = "../transaction-factory" } -node-inspect = { version = "0.8.0-alpha.2", optional = true, path = "../inspect" } +sc-cli = { version = "0.8.0-alpha.5", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "2.0.0-alpha.5", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-transaction-factory = { version = "0.8.0-alpha.5", optional = true, path = "../transaction-factory" } +node-inspect = { version = "0.8.0-alpha.5", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-alpha.2" } +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-alpha.5"} + +[target.'cfg(target_arch="x86_64")'.dependencies] +node-executor = { version = "2.0.0-alpha.4", path = "../executor", features = [ "wasmtime" ] } +sc-cli = { version = "0.8.0-alpha.4", optional = true, path = "../../../client/cli", features = [ "wasmtime" ] } +sc-service = { version = "0.8.0-alpha.4", default-features = false, path = "../../../client/service", features = [ "wasmtime" ] } [dev-dependencies] -sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } -sc-consensus-babe = { version = "0.8.0-alpha.2", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../../client/consensus/epochs" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../../client/keystore" } +sc-consensus-babe = { version = "0.8.0-alpha.5", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8.0-alpha.5", path = "../../../client/consensus/epochs" } sc-service-test = { version = "2.0.0-dev", path = "../../../client/service/test" } -futures = "0.3.1" +futures = "0.3.4" tempfile = "3.1.0" -assert_cmd = "0.12" +assert_cmd = "1.0" nix = "0.17" serde_json = "1.0" +regex = "1" +platforms = "0.2.1" [build-dependencies] -build-script-utils = { version = "2.0.0-alpha.2", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } structopt = { version = "0.3.8", optional = true } -node-transaction-factory = { version = "0.8.0-alpha.2", optional = true, path = "../transaction-factory" } -node-inspect = { version = "0.8.0-alpha.2", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0-alpha.2", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-transaction-factory = { version = "0.8.0-alpha.5", optional = true, path = "../transaction-factory" } +node-inspect = { version = "0.8.0-alpha.5", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "2.0.0-alpha.5", optional = true, path = "../../../utils/frame/benchmarking-cli" } +substrate-build-script-utils = { version = "2.0.0-alpha.5", optional = true, path = "../../../utils/build-script-utils" } [build-dependencies.sc-cli] -version = "0.8.0-alpha.2" +version = "0.8.0-alpha.5" package = "sc-cli" path = "../../../client/cli" optional = true -[build-dependencies.vergen] -version = "3.0.4" -optional = true - [features] -default = ["cli", "wasmtime"] +default = [ "cli" ] browser = [ "browser-utils", "wasm-bindgen", @@ -143,12 +147,9 @@ cli = [ "frame-benchmarking-cli", "sc-service/rocksdb", "structopt", - "vergen", -] -wasmtime = [ - "cli", - "node-executor/wasmtime", - "sc-cli/wasmtime", - "sc-service/wasmtime", + "substrate-build-script-utils", ] runtime-benchmarks = [ "node-runtime/runtime-benchmarks" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/cli/bin/main.rs b/bin/node/cli/bin/main.rs index 8c4412667baceec56904864413178cbc88001497..cfad84a4cb5520f2542dc18da771d4cfa4a0a9ed 100644 --- a/bin/node/cli/bin/main.rs +++ b/bin/node/cli/bin/main.rs @@ -19,16 +19,5 @@ #![warn(missing_docs)] fn main() -> sc_cli::Result<()> { - let version = sc_cli::VersionInfo { - name: "Substrate Node", - commit: env!("VERGEN_SHA_SHORT"), - version: env!("CARGO_PKG_VERSION"), - executable_name: "substrate", - author: "Parity Technologies ", - description: "Generic substrate node", - support_url: "https://github.com/paritytech/substrate/issues/new", - copyright_start_year: 2017, - }; - - node_cli::run(std::env::args(), version) + node_cli::run() } diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index e824b59be64f3dedba3b738a6a3908b9e48b5091..12e0cab58ada5797c069e9a24970be6daa06de4f 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -24,14 +24,14 @@ mod cli { include!("src/cli.rs"); use std::{fs, env, path::Path}; - use sc_cli::{structopt::clap::Shell}; - use vergen::{ConstantsFlags, generate_cargo_keys}; + use sc_cli::structopt::clap::Shell; + use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; pub fn main() { build_shell_completion(); - generate_cargo_keys(ConstantsFlags::all()).expect("Failed to generate metadata files"); + generate_cargo_keys(); - build_script_utils::rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } /// Build shell completion scripts for all known shells diff --git a/bin/node/cli/res/flaming-fir.json b/bin/node/cli/res/flaming-fir.json index 573a9e3aa54a40e45b260629c63ec4666353aebe..7ed98239b54b62c2a3c6309d96b13dedcdb1e65d 100644 --- a/bin/node/cli/res/flaming-fir.json +++ b/bin/node/cli/res/flaming-fir.json @@ -23,7 +23,9 @@ "tokenSymbol": "FIR" }, "forkBlocks": null, - "badBlocks": null, + "badBlocks": [ + "0xf3b02820f81988282e1da41fd479ef2aa00d63d622863639ea15d48ab6533fdc" + ], "consensusEngine": null, "genesis": { "raw": { diff --git a/bin/node/cli/src/browser.rs b/bin/node/cli/src/browser.rs index e79ce273a32cc2ffde7ec1194fdbd81002e2fa25..6cd98dfe8d0d8ef0561406cb6a6b68fa6a3a0e88 100644 --- a/bin/node/cli/src/browser.rs +++ b/bin/node/cli/src/browser.rs @@ -41,11 +41,11 @@ async fn start_inner(chain_spec: String, log_level: String) -> Result ChainSpec { ChainSpec::from_genesis( "Staging Testnet", "staging_testnet", + ChainType::Live, staging_testnet_config_genesis, boot_nodes, - Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)])), + Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Staging telemetry url is valid; qed")), None, None, Default::default(), @@ -181,7 +184,7 @@ pub fn get_account_id_from_seed(seed: &str) -> AccountId where } /// Helper function to generate stash, controller and session key from seed -pub fn get_authority_keys_from_seed(seed: &str) -> ( +pub fn authority_keys_from_seed(seed: &str) -> ( AccountId, AccountId, GrandpaId, @@ -201,7 +204,14 @@ pub fn get_authority_keys_from_seed(seed: &str) -> ( /// Helper function to create GenesisConfig for testing pub fn testnet_genesis( - initial_authorities: Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId)>, + initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + )>, root_key: AccountId, endowed_accounts: Option>, enable_println: bool, @@ -243,7 +253,12 @@ pub fn testnet_genesis( }), pallet_session: Some(SessionConfig { keys: initial_authorities.iter().map(|x| { - (x.0.clone(), x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone())) + (x.0.clone(), x.0.clone(), session_keys( + x.2.clone(), + x.3.clone(), + x.4.clone(), + x.5.clone(), + )) }).collect::>(), }), pallet_staking: Some(StakingConfig { @@ -257,13 +272,14 @@ pub fn testnet_genesis( .. Default::default() }), pallet_democracy: Some(DemocracyConfig::default()), - pallet_collective_Instance1: Some(CouncilConfig { + pallet_elections_phragmen: Some(ElectionsConfig { members: endowed_accounts.iter() .take((num_endowed_accounts + 1) / 2) .cloned() + .map(|member| (member, STASH)) .collect(), - phantom: Default::default(), }), + pallet_collective_Instance1: Some(CouncilConfig::default()), pallet_collective_Instance2: Some(TechnicalCommitteeConfig { members: endowed_accounts.iter() .take((num_endowed_accounts + 1) / 2) @@ -310,7 +326,7 @@ pub fn testnet_genesis( fn development_config_genesis() -> GenesisConfig { testnet_genesis( vec![ - get_authority_keys_from_seed("Alice"), + authority_keys_from_seed("Alice"), ], get_account_id_from_seed::("Alice"), None, @@ -323,6 +339,7 @@ pub fn development_config() -> ChainSpec { ChainSpec::from_genesis( "Development", "dev", + ChainType::Development, development_config_genesis, vec![], None, @@ -335,8 +352,8 @@ pub fn development_config() -> ChainSpec { fn local_testnet_genesis() -> GenesisConfig { testnet_genesis( vec![ - get_authority_keys_from_seed("Alice"), - get_authority_keys_from_seed("Bob"), + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), ], get_account_id_from_seed::("Alice"), None, @@ -349,6 +366,7 @@ pub fn local_testnet_config() -> ChainSpec { ChainSpec::from_genesis( "Local Testnet", "local_testnet", + ChainType::Local, local_testnet_genesis, vec![], None, @@ -368,7 +386,7 @@ pub(crate) mod tests { fn local_testnet_genesis_instant_single() -> GenesisConfig { testnet_genesis( vec![ - get_authority_keys_from_seed("Alice"), + authority_keys_from_seed("Alice"), ], get_account_id_from_seed::("Alice"), None, @@ -381,6 +399,7 @@ pub(crate) mod tests { ChainSpec::from_genesis( "Integration Test", "test", + ChainType::Development, local_testnet_genesis_instant_single, vec![], None, @@ -395,6 +414,7 @@ pub(crate) mod tests { ChainSpec::from_genesis( "Integration Test", "test", + ChainType::Development, local_testnet_genesis, vec![], None, diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index b6db9c3deb7e31f8f8da62d30b6971bf572b30d1..44b18fd716337c0857ebdf50f442001db12034c0 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_cli::{SharedParams, ImportParams, RunCmd}; +use sc_cli::{ImportParams, RunCmd, SharedParams}; use structopt::StructOpt; /// An overarching CLI command definition. @@ -50,10 +50,7 @@ pub enum Subcommand { Inspect(node_inspect::cli::InspectCmd), /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt( - name = "benchmark", - about = "Benchmark runtime pallets." - )] + #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] Benchmark(frame_benchmarking_cli::BenchmarkCmd), } @@ -62,11 +59,11 @@ pub enum Subcommand { #[derive(Debug, StructOpt, Clone)] pub struct FactoryCmd { /// Number of blocks to generate. - #[structopt(long="blocks", default_value = "1")] + #[structopt(long = "blocks", default_value = "1")] pub blocks: u32, /// Number of transactions to push per block. - #[structopt(long="transactions", default_value = "8")] + #[structopt(long = "transactions", default_value = "8")] pub transactions: u32, #[allow(missing_docs)] diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 21584f0898466dda3fca348bb40056b1436b43ac..37b77d3bb7475e31202d7592b4998e2c564084d5 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -14,102 +14,126 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_cli::VersionInfo; -use sc_service::{Roles as ServiceRoles}; +use crate::{chain_spec, factory_impl::FactoryState, service, Cli, FactoryCmd, Subcommand}; +use node_executor::Executor; +use node_runtime::{Block, RuntimeApi}; use node_transaction_factory::RuntimeAdapter; -use crate::{Cli, service, ChainSpec, load_spec, Subcommand, factory_impl::FactoryState}; +use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams, SubstrateCli}; +use sc_service::Configuration; + +impl SubstrateCli for Cli { + fn impl_name() -> &'static str { + "Substrate Node" + } + + fn impl_version() -> &'static str { + env!("SUBSTRATE_CLI_IMPL_VERSION") + } + + fn description() -> &'static str { + env!("CARGO_PKG_DESCRIPTION") + } + + fn author() -> &'static str { + env!("CARGO_PKG_AUTHORS") + } + + fn support_url() -> &'static str { + "https://github.com/paritytech/substrate/issues/new" + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn executable_name() -> &'static str { + "substrate" + } + + fn load_spec(&self, id: &str) -> std::result::Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()), + "local" => Box::new(chain_spec::local_testnet_config()), + "" | "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), + "staging" => Box::new(chain_spec::staging_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }) + } +} /// Parse command line arguments into service configuration. -pub fn run(args: I, version: VersionInfo) -> sc_cli::Result<()> -where - I: Iterator, - T: Into + Clone, -{ - let args: Vec<_> = args.collect(); - let opt = sc_cli::from_iter::(args.clone(), &version); +pub fn run() -> Result<()> { + sc_cli::reset_signal_pipe_handler()?; - let mut config = sc_service::Configuration::from_version(&version); + let cli = Cli::from_args(); - match opt.subcommand { + match &cli.subcommand { None => { - opt.run.init(&version)?; - opt.run.update_config(&mut config, load_spec, &version)?; - opt.run.run( - config, - service::new_light, - service::new_full, - &version, - ) - }, + let runner = cli.create_runner(&cli.run)?; + runner.run_node(service::new_light, service::new_full) + } Some(Subcommand::Inspect(cmd)) => { - cmd.init(&version)?; - cmd.update_config(&mut config, load_spec, &version)?; - - let client = sc_service::new_full_client::< - node_runtime::Block, node_runtime::RuntimeApi, node_executor::Executor, - >(&config)?; - let inspect = node_inspect::Inspector::::new(client); + let runner = cli.create_runner(cmd)?; - cmd.run(inspect) - }, + runner.sync_run(|config| cmd.run::(config)) + } Some(Subcommand::Benchmark(cmd)) => { - cmd.init(&version)?; - cmd.update_config(&mut config, load_spec, &version)?; - - cmd.run::(config) - }, - Some(Subcommand::Factory(cli_args)) => { - cli_args.shared_params.init(&version)?; - cli_args.shared_params.update_config(&mut config, load_spec, &version)?; - cli_args.import_params.update_config( - &mut config, - ServiceRoles::FULL, - cli_args.shared_params.dev, - )?; - - config.use_in_memory_keystore()?; - - match ChainSpec::from(config.expect_chain_spec().id()) { - Some(ref c) if c == &ChainSpec::Development || c == &ChainSpec::LocalTestnet => {}, - _ => return Err( - "Factory is only supported for development and local testnet.".into() - ), - } + let runner = cli.create_runner(cmd)?; - // Setup tracing. - if let Some(tracing_targets) = cli_args.import_params.tracing_targets.as_ref() { - let subscriber = sc_tracing::ProfilingSubscriber::new( - cli_args.import_params.tracing_receiver.into(), tracing_targets - ); - if let Err(e) = tracing::subscriber::set_global_default(subscriber) { - return Err( - format!("Unable to set global default subscriber {}", e).into() - ); - } - } + runner.sync_run(|config| cmd.run::(config)) + } + Some(Subcommand::Factory(cmd)) => { + let runner = cli.create_runner(cmd)?; - let factory_state = FactoryState::new( - cli_args.blocks, - cli_args.transactions, - ); + runner.sync_run(|config| cmd.run(config)) + } + Some(Subcommand::Base(subcommand)) => { + let runner = cli.create_runner(subcommand)?; + + runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + } + } +} - let service_builder = new_full_start!(config).0; - node_transaction_factory::factory( - factory_state, - service_builder.client(), - service_builder.select_chain() - .expect("The select_chain is always initialized by new_full_start!; QED") - ).map_err(|e| format!("Error in transaction factory: {}", e))?; +impl CliConfiguration for FactoryCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) - }, - Some(Subcommand::Base(subcommand)) => { - subcommand.init(&version)?; - subcommand.update_config(&mut config, load_spec, &version)?; - subcommand.run( - config, - |config: sc_service::Configuration| Ok(new_full_start!(config).0), - ) - }, + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } +} + +impl FactoryCmd { + fn run(&self, config: Configuration) -> Result<()> { + match config.chain_spec.id() { + "dev" | "local" => {} + _ => return Err("Factory is only supported for development and local testnet.".into()), + } + + // Setup tracing. + if let Some(tracing_targets) = self.import_params.tracing_targets.as_ref() { + let subscriber = sc_tracing::ProfilingSubscriber::new( + self.import_params.tracing_receiver.into(), + tracing_targets, + ); + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + return Err(format!("Unable to set global default subscriber {}", e).into()); + } + } + + let factory_state = FactoryState::new(self.blocks, self.transactions); + + let service_builder = new_full_start!(config).0; + node_transaction_factory::factory( + factory_state, + service_builder.client(), + service_builder + .select_chain() + .expect("The select_chain is always initialized by new_full_start!; qed"), + ) } } diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index 6b3644856c6b027172ad62b132c65fcb92d05146..1e2c790bfa7d21a8857ba1bf2f90a3a419701585 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -47,45 +47,3 @@ pub use browser::*; pub use cli::*; #[cfg(feature = "cli")] pub use command::*; - -/// The chain specification option. -#[derive(Clone, Debug, PartialEq)] -pub enum ChainSpec { - /// Whatever the current runtime is, with just Alice as an auth. - Development, - /// Whatever the current runtime is, with simple Alice/Bob auths. - LocalTestnet, - /// The Flaming Fir testnet. - FlamingFir, - /// Whatever the current runtime is with the "global testnet" defaults. - StagingTestnet, -} - -/// Get a chain config from a spec setting. -impl ChainSpec { - pub(crate) fn load(self) -> Result { - Ok(match self { - ChainSpec::FlamingFir => chain_spec::flaming_fir_config()?, - ChainSpec::Development => chain_spec::development_config(), - ChainSpec::LocalTestnet => chain_spec::local_testnet_config(), - ChainSpec::StagingTestnet => chain_spec::staging_testnet_config(), - }) - } - - pub(crate) fn from(s: &str) -> Option { - match s { - "dev" => Some(ChainSpec::Development), - "local" => Some(ChainSpec::LocalTestnet), - "" | "fir" | "flaming-fir" => Some(ChainSpec::FlamingFir), - "staging" => Some(ChainSpec::StagingTestnet), - _ => None, - } - } -} - -fn load_spec(id: &str) -> Result, String> { - Ok(match ChainSpec::from(id) { - Some(spec) => Box::new(spec.load()?), - None => Box::new(chain_spec::ChainSpec::from_json_file(std::path::PathBuf::from(id))?), - }) -} diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 572221b6a51e838094047d2ddcd67a88212dfb16..257068cf144ac858036e42964ce5b8d6257935fd 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -88,7 +88,7 @@ macro_rules! new_full_start { import_setup = Some((block_import, grandpa_link, babe_link)); Ok(import_queue) })? - .with_rpc_extensions(|builder| -> Result { + .with_rpc_extensions(|builder| -> std::result::Result { let babe_link = import_setup.as_ref().map(|s| &s.2) .expect("BabeLink is present for full services or set up failed; qed."); let deps = node_rpc::FullDeps { @@ -120,24 +120,17 @@ macro_rules! new_full { use sc_client_api::ExecutorProvider; let ( - is_authority, + role, force_authoring, name, disable_grandpa, - sentry_nodes, ) = ( - $config.roles.is_authority(), + $config.role.clone(), $config.force_authoring, - $config.name.clone(), + $config.network.node_name.clone(), $config.disable_grandpa, - $config.network.sentry_nodes.clone(), ); - // sentry nodes announce themselves as authorities to the network - // and should run the same protocols authorities do, but it should - // never actively participate in any consensus process. - let participates_in_consensus = is_authority && !$config.sentry_mode; - let (builder, mut import_setup, inherent_data_providers) = new_full_start!($config); let service = builder @@ -153,7 +146,7 @@ macro_rules! new_full { ($with_startup_data)(&block_import, &babe_link); - if participates_in_consensus { + if let sc_service::config::Role::Authority { sentry_nodes } = &role { let proposer = sc_basic_authorship::ProposerFactory::new( service.client(), service.transaction_pool() @@ -190,7 +183,7 @@ macro_rules! new_full { let authority_discovery = sc_authority_discovery::AuthorityDiscovery::new( service.client(), network, - sentry_nodes, + sentry_nodes.clone(), service.keystore(), dht_event_stream, service.prometheus_registry(), @@ -201,7 +194,7 @@ macro_rules! new_full { // if the node isn't actively participating in consensus then it doesn't // need a keystore, regardless of which protocol we use below. - let keystore = if participates_in_consensus { + let keystore = if role.is_authority() { Some(service.keystore()) } else { None @@ -214,7 +207,7 @@ macro_rules! new_full { name: Some(name), observer_enabled: false, keystore, - is_authority, + is_authority: role.is_network_authority(), }; let enable_grandpa = !disable_grandpa; @@ -397,6 +390,7 @@ mod tests { use sc_service::AbstractService; use crate::service::{new_full, new_light}; use sp_runtime::traits::IdentifyAccount; + use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; type AccountPublic = ::Signer; @@ -414,7 +408,21 @@ mod tests { let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); let block_factory = |service: &::FullService| { let block_id = BlockId::number(service.client().chain_info().best_number); - let parent_header = service.client().header(&block_id).unwrap().unwrap(); + let parent_header = service.client().best_header(&block_id) + .expect("db error") + .expect("best block should exist"); + + futures::executor::block_on( + service.transaction_pool().maintain( + ChainEvent::NewBlock { + is_new_best: true, + id: block_id.clone(), + retracted: vec![], + header: parent_header, + }, + ) + ); + let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); let proposer_factory = consensus::ProposerFactory { client: service.client().clone(), @@ -464,6 +472,8 @@ mod tests { } #[test] + // It is "ignored", but the node-cli ignored tests are running on the CI. + // This can be run locally with `cargo test --release -p node-cli test_sync -- --ignored`. #[ignore] fn test_sync() { let keystore_path = tempfile::tempdir().expect("Creates keystore path"); @@ -504,17 +514,28 @@ mod tests { let parent_header = service.client().header(&parent_id).unwrap().unwrap(); let parent_hash = parent_header.hash(); let parent_number = *parent_header.number(); + + futures::executor::block_on( + service.transaction_pool().maintain( + ChainEvent::NewBlock { + is_new_best: true, + id: parent_id.clone(), + retracted: vec![], + header: parent_header.clone(), + }, + ) + ); + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( service.client(), service.transaction_pool() ); - let epoch = babe_link.epoch_changes().lock().epoch_for_child_of( + let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( descendent_query(&*service.client()), &parent_hash, parent_number, slot_num, - |slot| babe_link.config().genesis_epoch(slot) ).unwrap().unwrap(); let mut digest = Digest::::default(); @@ -564,7 +585,7 @@ mod tests { params.body = Some(new_body); params.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); params.fork_choice = Some(ForkChoiceStrategy::LongestChain); diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs new file mode 100644 index 0000000000000000000000000000000000000000..5555efd3854d4e08ecae60dd0c57ecf05b22e040 --- /dev/null +++ b/bin/node/cli/tests/version.rs @@ -0,0 +1,83 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use assert_cmd::cargo::cargo_bin; +use platforms::*; +use regex::Regex; +use std::process::Command; + +fn expected_regex() -> Regex { + Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+)-(.+?)-(.+?)(?:-(.+))?$").unwrap() +} + +#[test] +fn version_is_full() { + let expected = expected_regex(); + let output = Command::new(cargo_bin("substrate")) + .args(&["--version"]) + .output() + .unwrap(); + + assert!( + output.status.success(), + "command returned with non-success exit code" + ); + + let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); + let captures = expected + .captures(output.as_str()) + .expect("could not parse version in output"); + + assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); + assert_eq!(&captures[3], TARGET_ARCH.as_str()); + assert_eq!(&captures[4], TARGET_OS.as_str()); + assert_eq!( + captures.get(5).map(|x| x.as_str()), + TARGET_ENV.map(|x| x.as_str()) + ); +} + +#[test] +fn test_regex_matches_properly() { + let expected = expected_regex(); + + let captures = expected + .captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu") + .unwrap(); + assert_eq!(&captures[1], "2.0.0"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); + + let captures = expected + .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu") + .unwrap(); + assert_eq!(&captures[1], "2.0.0-alpha.5"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); + + let captures = expected + .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux") + .unwrap(); + assert_eq!(&captures[1], "2.0.0-alpha.5"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), None); +} diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index a77efcf7f85260389800ca7f1f8018e0784f7324..2f1060a99884505ba94f1692f493bf8cdb01f43a 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-executor" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" @@ -9,34 +9,34 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } -node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } -node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0-alpha.2", path = "../../../primitives/trie" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../../client/executor" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } +sp-trie = { version = "2.0.0-alpha.5", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0-alpha.2", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "2.0.0-alpha.5", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0-alpha.2", path = "../../../frame/support" } -frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } -node-testing = { version = "2.0.0-alpha.2", path = "../testing" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-alpha.2", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-alpha.2", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0-alpha.2", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0-alpha.2", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-alpha.2", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-externalities = { version = "0.8.0-alpha.3", path = "../../../primitives/externalities" } +frame-support = { version = "2.0.0-alpha.5", path = "../../../frame/support" } +frame-system = { version = "2.0.0-alpha.5", path = "../../../frame/system" } +node-testing = { version = "2.0.0-alpha.5", path = "../testing" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../../../frame/balances" } +pallet-contracts = { version = "2.0.0-alpha.5", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-alpha.5", path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-alpha.5", path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-alpha.5", path = "../../../frame/indices" } +pallet-session = { version = "2.0.0-alpha.5", path = "../../../frame/session" } +pallet-timestamp = { version = "2.0.0-alpha.5", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-alpha.5", path = "../../../frame/treasury" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../../../primitives/externalities" } substrate-test-client = { version = "2.0.0-dev", path = "../../../test-utils/client" } wabt = "0.9.2" @@ -52,3 +52,6 @@ stress-test = [] [[bench]] name = "bench" harness = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 1ee0a17c81120ca32a7a9af2d704ce97699ee454..fccf4a62cc21dbe81f3fa72f6ab2fde8ae6fceda 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -338,7 +338,7 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 10000, class: DispatchClass::Operational, pays_fee: true } + DispatchInfo { weight: 10000, class: DispatchClass::Mandatory, pays_fee: true } )), topics: vec![], }, @@ -391,7 +391,7 @@ fn full_native_block_import_works() { EventRecord { phase: Phase::ApplyExtrinsic(0), event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 10000, class: DispatchClass::Operational, pays_fee: true } + DispatchInfo { weight: 10000, class: DispatchClass::Mandatory, pays_fee: true } )), topics: vec![], }, diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index 1a92aeca6ba77b61597f3f0d19317f67adca2b03..536cf486e38ae28e0df13fadd65ac9b5c20e99a0 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -15,7 +15,7 @@ // along with Substrate. If not, see . use node_runtime::{ - Call, Executive, Indices, Runtime, SubmitTransaction, UncheckedExtrinsic, + Call, Executive, Indices, Runtime, TransactionSubmitterOf, UncheckedExtrinsic, }; use sp_application_crypto::AppKey; use sp_core::testing::KeyStore; @@ -31,6 +31,8 @@ use codec::Decode; pub mod common; use self::common::*; +type SubmitTransaction = TransactionSubmitterOf; + #[test] fn should_submit_unsigned_transaction() { let mut t = new_test_ext(COMPACT_CODE, false); @@ -138,7 +140,7 @@ fn should_submit_signed_twice_from_the_same_account() { fn submitted_transaction_should_be_valid() { use codec::Encode; use frame_support::storage::StorageMap; - use sp_runtime::transaction_validity::ValidTransaction; + use sp_runtime::transaction_validity::{ValidTransaction, TransactionSource}; use sp_runtime::traits::StaticLookup; let mut t = new_test_ext(COMPACT_CODE, false); @@ -163,6 +165,7 @@ fn submitted_transaction_should_be_valid() { let tx0 = state.read().transactions[0].clone(); let mut t = new_test_ext(COMPACT_CODE, false); t.execute_with(|| { + let source = TransactionSource::External; let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); // add balance to the account let author = extrinsic.signature.clone().unwrap().0; @@ -172,15 +175,14 @@ fn submitted_transaction_should_be_valid() { >::insert(&address, account); // check validity - let res = Executive::validate_transaction(extrinsic); + let res = Executive::validate_transaction(source, extrinsic); assert_eq!(res.unwrap(), ValidTransaction { priority: 2_411_002_000_000, requires: vec![], provides: vec![(address, 0).encode()], - longevity: 127, + longevity: 128, propagate: true, }); }); } - diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index 022f4d0ca49f5252cedce67ea6750a4c5aae8a18..9e94fe74d68469d9570fe76292d004e9106ef049 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-inspect" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -8,13 +8,16 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } structopt = "0.3.8" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index 16bfda2bd927a8890d3416cfe8b75e96d216b117..2212907f763138feef1607b2236bdbbf7e0867a3 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -16,186 +16,48 @@ //! Command ran by the CLI -use std::{ - fmt::Debug, - str::FromStr, -}; - use crate::cli::{InspectCmd, InspectSubCmd}; -use crate::{Inspector, PrettyPrinter}; +use crate::Inspector; +use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams}; +use sc_service::{new_full_client, Configuration, NativeExecutionDispatch}; +use sp_runtime::traits::Block; +use std::str::FromStr; impl InspectCmd { - /// Initialize - pub fn init(&self, version: &sc_cli::VersionInfo) -> sc_cli::Result<()> { - self.shared_params.init(version) - } - - /// Parse CLI arguments and initialize given config. - pub fn update_config( - &self, - mut config: &mut sc_service::config::Configuration, - spec_factory: impl FnOnce(&str) -> Result, String>, - version: &sc_cli::VersionInfo, - ) -> sc_cli::Result<()> { - self.shared_params.update_config(config, spec_factory, version)?; - - // make sure to configure keystore - config.use_in_memory_keystore()?; - - // and all import params (especially pruning that has to match db meta) - self.import_params.update_config( - &mut config, - sc_service::Roles::FULL, - self.shared_params.dev, - )?; - - Ok(()) - } - /// Run the inspect command, passing the inspector. - pub fn run( - self, - inspect: Inspector, - ) -> sc_cli::Result<()> where - B: sp_runtime::traits::Block, + pub fn run(&self, config: Configuration) -> Result<()> + where + B: Block, B::Hash: FromStr, - P: PrettyPrinter, + RA: Send + Sync + 'static, + EX: NativeExecutionDispatch + 'static, { - match self.command { + let client = new_full_client::(&config)?; + let inspect = Inspector::::new(client); + + match &self.command { InspectSubCmd::Block { input } => { let input = input.parse()?; - let res = inspect.block(input) - .map_err(|e| format!("{}", e))?; + let res = inspect.block(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - }, + } InspectSubCmd::Extrinsic { input } => { let input = input.parse()?; - let res = inspect.extrinsic(input) - .map_err(|e| format!("{}", e))?; + let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; println!("{}", res); Ok(()) - }, - } - } -} - -/// A block to retrieve. -#[derive(Debug, Clone, PartialEq)] -pub enum BlockAddress { - /// Get block by hash. - Hash(Hash), - /// Get block by number. - Number(Number), - /// Raw SCALE-encoded bytes. - Bytes(Vec), -} - -impl FromStr for BlockAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // try to parse hash first - if let Ok(hash) = s.parse() { - return Ok(Self::Hash(hash)) - } - - // then number - if let Ok(number) = s.parse() { - return Ok(Self::Number(number)) + } } - - // then assume it's bytes (hex-encoded) - sp_core::bytes::from_hex(s) - .map(Self::Bytes) - .map_err(|e| format!( - "Given string does not look like hash or number. It could not be parsed as bytes either: {}", - e - )) - } -} - -/// An extrinsic address to decode and print out. -#[derive(Debug, Clone, PartialEq)] -pub enum ExtrinsicAddress { - /// Extrinsic as part of existing block. - Block(BlockAddress, usize), - /// Raw SCALE-encoded extrinsic bytes. - Bytes(Vec), -} - -impl FromStr for ExtrinsicAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // first try raw bytes - if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { - return Ok(bytes) - } - - // split by a bunch of different characters - let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); - let block = it.next() - .expect("First element of split iterator is never empty; qed") - .parse()?; - - let index = it.next() - .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? - .parse() - .map_err(|e| format!("Invalid index format: {}", e))?; - - Ok(Self::Block(block, index)) } } -#[cfg(test)] -mod tests { - use super::*; - use sp_core::hash::H160 as Hash; - - #[test] - fn should_parse_block_strings() { - type BlockAddress = super::BlockAddress; - - let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); - let b1 = BlockAddress::from_str("1234"); - let b2 = BlockAddress::from_str("0"); - let b3 = BlockAddress::from_str("0x0012345f"); - - - assert_eq!(b0, Ok(BlockAddress::Hash( - "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() - ))); - assert_eq!(b1, Ok(BlockAddress::Number(1234))); - assert_eq!(b2, Ok(BlockAddress::Number(0))); - assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); +impl CliConfiguration for InspectCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params } - #[test] - fn should_parse_extrinsic_address() { - type BlockAddress = super::BlockAddress; - type ExtrinsicAddress = super::ExtrinsicAddress; - - let e0 = ExtrinsicAddress::from_str("1234"); - let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); - let b1 = ExtrinsicAddress::from_str("1234:0"); - let b2 = ExtrinsicAddress::from_str("0 0"); - let b3 = ExtrinsicAddress::from_str("0x0012345f"); - - - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); - assert_eq!(b0, Ok(ExtrinsicAddress::Block( - BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), - 5 - ))); - assert_eq!(b1, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(1234), - 0 - ))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(0), - 0 - ))); - assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) } } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index c82682d6021dbbac42f51c0367343d1dec8dcb65..b8101d98a31ce717b4a57f9ee939abe2444253a9 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -27,7 +27,9 @@ pub mod command; use std::{ fmt, - marker::PhantomData + fmt::Debug, + marker::PhantomData, + str::FromStr, }; use codec::{Encode, Decode}; use sc_client_api::BlockBackend; @@ -38,8 +40,6 @@ use sp_runtime::{ traits::{Block, HashFor, NumberFor, Hash} }; -use command::{BlockAddress, ExtrinsicAddress}; - /// A helper type for a generic block input. pub type BlockAddressFor = BlockAddress< as Hash>::Output, @@ -205,3 +205,123 @@ impl> Inspector Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) } } + +/// A block to retrieve. +#[derive(Debug, Clone, PartialEq)] +pub enum BlockAddress { + /// Get block by hash. + Hash(Hash), + /// Get block by number. + Number(Number), + /// Raw SCALE-encoded bytes. + Bytes(Vec), +} + +impl FromStr for BlockAddress { + type Err = String; + + fn from_str(s: &str) -> Result { + // try to parse hash first + if let Ok(hash) = s.parse() { + return Ok(Self::Hash(hash)) + } + + // then number + if let Ok(number) = s.parse() { + return Ok(Self::Number(number)) + } + + // then assume it's bytes (hex-encoded) + sp_core::bytes::from_hex(s) + .map(Self::Bytes) + .map_err(|e| format!( + "Given string does not look like hash or number. It could not be parsed as bytes either: {}", + e + )) + } +} + +/// An extrinsic address to decode and print out. +#[derive(Debug, Clone, PartialEq)] +pub enum ExtrinsicAddress { + /// Extrinsic as part of existing block. + Block(BlockAddress, usize), + /// Raw SCALE-encoded extrinsic bytes. + Bytes(Vec), +} + +impl FromStr for ExtrinsicAddress { + type Err = String; + + fn from_str(s: &str) -> Result { + // first try raw bytes + if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { + return Ok(bytes) + } + + // split by a bunch of different characters + let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); + let block = it.next() + .expect("First element of split iterator is never empty; qed") + .parse()?; + + let index = it.next() + .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? + .parse() + .map_err(|e| format!("Invalid index format: {}", e))?; + + Ok(Self::Block(block, index)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::hash::H160 as Hash; + + #[test] + fn should_parse_block_strings() { + type BlockAddress = super::BlockAddress; + + let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); + let b1 = BlockAddress::from_str("1234"); + let b2 = BlockAddress::from_str("0"); + let b3 = BlockAddress::from_str("0x0012345f"); + + + assert_eq!(b0, Ok(BlockAddress::Hash( + "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() + ))); + assert_eq!(b1, Ok(BlockAddress::Number(1234))); + assert_eq!(b2, Ok(BlockAddress::Number(0))); + assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } + + #[test] + fn should_parse_extrinsic_address() { + type BlockAddress = super::BlockAddress; + type ExtrinsicAddress = super::ExtrinsicAddress; + + let e0 = ExtrinsicAddress::from_str("1234"); + let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); + let b1 = ExtrinsicAddress::from_str("1234:0"); + let b2 = ExtrinsicAddress::from_str("0 0"); + let b3 = ExtrinsicAddress::from_str("0x0012345f"); + + + assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); + assert_eq!(b0, Ok(ExtrinsicAddress::Block( + BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), + 5 + ))); + assert_eq!(b1, Ok(ExtrinsicAddress::Block( + BlockAddress::Number(1234), + 0 + ))); + assert_eq!(b2, Ok(ExtrinsicAddress::Block( + BlockAddress::Number(0), + 0 + ))); + assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } +} diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index cb271b987dba2e99c725258713d633863ff81e8b..81c5009f394d57332d5c9ef65ed8c6bb2cb6f120 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-primitives" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -8,11 +8,11 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0-alpha.2", path = "../../../primitives/serializer" } +sp-serializer = { version = "2.0.0-alpha.5", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] @@ -21,3 +21,6 @@ std = [ "sp-core/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 8b37aff291341bd52cc11503fc4d37597769411c..df095bc5bb10d2574a01790f3ed32ea9c8585ebe 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc-client" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,7 +11,10 @@ repository = "https://github.com/paritytech/substrate/" env_logger = "0.7.0" futures = "0.1.29" hyper = "0.12.35" -jsonrpc-core-client = { version = "14.0.3", features = ["http", "ws"] } +jsonrpc-core-client = { version = "14.0.5", default-features = false, features = ["http"] } log = "0.4.8" -node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } -sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +sc-rpc = { version = "2.0.0-alpha.5", path = "../../../client/rpc" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index 1155eab304f8ae953207565d13f9434a4d27bb50..f1d230af90dc5e9e65da0ba4284355fa5292435a 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -8,20 +8,23 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client/" } jsonrpc-core = "14.0.3" -node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } -node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -pallet-contracts-rpc = { version = "0.8.0-alpha.2", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment/rpc/" } -substrate-frame-rpc-system = { version = "2.0.0-alpha.2", path = "../../../utils/frame/rpc/system" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } -sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe/rpc" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } -sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../../client/consensus/epochs" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +pallet-contracts-rpc = { version = "0.8.0-alpha.5", path = "../../../frame/contracts/rpc/" } +pallet-transaction-payment-rpc = { version = "2.0.0-alpha.5", path = "../../../frame/transaction-payment/rpc/" } +substrate-frame-rpc-system = { version = "2.0.0-alpha.5", path = "../../../utils/frame/rpc/system" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } +sc-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.8.0-alpha.5", path = "../../../client/consensus/babe/rpc" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/babe" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../../client/keystore" } +sc-consensus-epochs = { version = "0.8.0-alpha.5", path = "../../../client/consensus/epochs" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 15672715a415d0c6471e00faaadad3715b2c7789..b8e5f706291459f72e3587179054ec8a9e635c72 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -11,70 +11,72 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } integer-sqrt = { version = "0.1.2" } -rustc-hex = { version = "2.0", optional = true } serde = { version = "1.0.102", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.2"} -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/inherents" } -node-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/version" } +sp-authority-discovery = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-alpha.5", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.5"} +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/inherents" } +node-primitives = { version = "2.0.0-alpha.5", default-features = false, path = "../primitives" } +sp-offchain = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "2.0.0-alpha.5", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/benchmarking", optional = true } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.2", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/finality-tracker" } -pallet-grandpa = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/membership" } -pallet-offences = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/offences" } -pallet-randomness-collective-flip = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0-alpha.2", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-staking = { version = "2.0.0-alpha.2", features = ["migrate"], path = "../../../frame/staking", default-features = false } -pallet-staking-reward-curve = { version = "2.0.0-alpha.2", path = "../../../frame/staking/reward-curve" } -pallet-sudo = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/sudo" } -pallet-society = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/society" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-authority-discovery = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/balances" } +pallet-collective = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.5", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/finality-tracker" } +pallet-grandpa = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/identity" } +pallet-membership = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/membership" } +pallet-offences = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/offences" } +pallet-offences-benchmarking = { version = "2.0.0-alpha.5", path = "../../../frame/offences/benchmarking", default-features = false, optional = true } +pallet-randomness-collective-flip = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "2.0.0-alpha.5", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-session-benchmarking = { version = "2.0.0-alpha.5", path = "../../../frame/session/benchmarking", default-features = false, optional = true } +pallet-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/staking" } +pallet-staking-reward-curve = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/staking/reward-curve" } +pallet-scheduler = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/scheduler" } +pallet-society = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/society" } +pallet-sudo = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/sudo" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/timestamp" } +pallet-treasury = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "2.0.0-alpha.5", default-features = false, path = "../../../frame/vesting" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [dev-dependencies] -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } [features] default = ["std"] @@ -107,7 +109,6 @@ std = [ "sp-core/std", "pallet-randomness-collective-flip/std", "sp-std/std", - "rustc-hex", "serde", "pallet-session/std", "sp-api/std", @@ -134,8 +135,25 @@ std = [ ] runtime-benchmarks = [ "frame-benchmarking", - "pallet-timestamp/runtime-benchmarks", - "pallet-identity/runtime-benchmarks", + "frame-support/runtime-benchmarks", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", "pallet-balances/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-democracy/runtime-benchmarks", + "pallet-elections-phragmen/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", + "pallet-im-online/runtime-benchmarks", + "pallet-society/runtime-benchmarks", + "pallet-staking/runtime-benchmarks", + "pallet-timestamp/runtime-benchmarks", + "pallet-treasury/runtime-benchmarks", + "pallet-utility/runtime-benchmarks", "pallet-vesting/runtime-benchmarks", + "pallet-collective/runtime-benchmarks", + "pallet-offences-benchmarking", + "pallet-session-benchmarking", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 48b9037bf8a7516acf25d902e56871fbb540f1f3..54e236db96e96914a8d4098be0d6f0ad6196364f 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -35,7 +35,7 @@ use sp_runtime::{ impl_opaque_keys, generic, create_runtime_str, }; use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::transaction_validity::TransactionValidity; +use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; use sp_runtime::traits::{ self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, ConvertInto, OpaqueKeys, @@ -73,6 +73,51 @@ use constants::{time::*, currency::*}; #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); +/// A transaction submitter with the given key type. +pub type TransactionSubmitterOf = TransactionSubmitter; + +/// Submits transaction with the node's public and signature type. Adheres to the signed extension +/// format of the chain. +impl frame_system::offchain::CreateTransaction for Runtime { + type Public = ::Signer; + type Signature = Signature; + + fn create_transaction>( + call: Call, + public: Self::Public, + account: AccountId, + index: Index, + ) -> Option<(Call, ::SignaturePayload)> { + // take the biggest period possible. + let period = BlockHashCount::get() + .checked_next_power_of_two() + .map(|c| c / 2) + .unwrap_or(2) as u64; + let current_block = System::block_number() + .saturated_into::() + // The `System::block_number` is initialized with `n+1`, + // so the actual block number is `n`. + .saturating_sub(1); + let tip = 0; + let extra: SignedExtra = ( + frame_system::CheckVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(index), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + Default::default(), + ); + let raw_payload = SignedPayload::new(call, extra).map_err(|e| { + debug::warn!("Unable to create signed payload: {:?}", e); + }).ok()?; + let signature = TSigner::sign(public, &raw_payload)?; + let address = Indices::unlookup(account); + let (call, extra, _) = raw_payload.deconstruct(); + Some((call, (address, signature, extra))) + } +} + /// Runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { spec_name: create_runtime_str!("node"), @@ -82,8 +127,8 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 237, - impl_version: 0, + spec_version: 240, + impl_version: 1, apis: RUNTIME_API_VERSIONS, }; @@ -161,6 +206,17 @@ impl pallet_utility::Trait for Runtime { type MaxSignatories = MaxSignatories; } +parameter_types! { + pub const MaximumWeight: Weight = 2_000_000; +} + +impl pallet_scheduler::Trait for Runtime { + type Event = Event; + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; +} + parameter_types! { pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; @@ -216,6 +272,7 @@ impl pallet_transaction_payment::Trait for Runtime { parameter_types! { pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } + impl pallet_timestamp::Trait for Runtime { type Moment = Moment; type OnTimestampSet = Babe; @@ -255,6 +312,7 @@ impl pallet_session::Trait for Runtime { type SessionHandler = ::KeyTypeIdProviders; type Keys = SessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = Babe; } impl pallet_session::historical::Trait for Runtime { @@ -278,12 +336,13 @@ parameter_types! { pub const BondingDuration: pallet_staking::EraIndex = 24 * 28; pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub const ElectionLookahead: BlockNumber = 25; // 10 minutes per session => 100 block. pub const MaxNominatorRewardedPerValidator: u32 = 64; } impl pallet_staking::Trait for Runtime { type Currency = Balances; - type Time = Timestamp; + type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; type RewardRemainder = Treasury; type Event = Event; @@ -296,13 +355,19 @@ impl pallet_staking::Trait for Runtime { type SlashCancelOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; type SessionInterface = Self; type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type SubmitTransaction = TransactionSubmitterOf<()>; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = StakingUnsignedPriority; } parameter_types! { pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; - pub const EmergencyVotingPeriod: BlockNumber = 3 * 24 * 60 * MINUTES; + pub const FastTrackVotingPeriod: BlockNumber = 3 * 24 * 60 * MINUTES; + pub const InstantAllowed: bool = true; pub const MinimumDeposit: Balance = 100 * DOLLARS; pub const EnactmentPeriod: BlockNumber = 30 * 24 * 60 * MINUTES; pub const CooloffPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; @@ -328,7 +393,9 @@ impl pallet_democracy::Trait for Runtime { /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote /// be tabled immediately and with a shorter voting/enactment period. type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type EmergencyVotingPeriod = EmergencyVotingPeriod; + type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type InstantAllowed = InstantAllowed; + type FastTrackVotingPeriod = FastTrackVotingPeriod; // To cancel a proposal which has been passed, 2/3 of the council must agree to it. type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; // Any single technical committee member may veto a coming council proposal, however they can @@ -337,6 +404,7 @@ impl pallet_democracy::Trait for Runtime { type CooloffPeriod = CooloffPeriod; type PreimageByteDeposit = PreimageByteDeposit; type Slash = Treasury; + type Scheduler = Scheduler; } parameter_types! { @@ -363,6 +431,9 @@ impl pallet_elections_phragmen::Trait for Runtime { type Event = Event; type Currency = Balances; type ChangeMembers = Council; + // NOTE: this implies that council's genesis members cannot be set directly and must come from + // this module. + type InitializeMembers = Council; type CurrencyToVote = CurrencyToVoteHandler; type CandidacyBond = CandidacyBond; type VotingBond = VotingBond; @@ -467,20 +538,21 @@ impl pallet_sudo::Trait for Runtime { type Call = Call; } -/// A runtime transaction submitter. -pub type SubmitTransaction = TransactionSubmitter; - parameter_types! { pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_SLOTS as _; + pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); + /// We prioritize im-online heartbeats over phragmen solution submission. + pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; } impl pallet_im_online::Trait for Runtime { type AuthorityId = ImOnlineId; type Event = Event; type Call = Call; - type SubmitTransaction = SubmitTransaction; + type SubmitTransaction = TransactionSubmitterOf; type SessionDuration = SessionDuration; type ReportUnresponsiveness = Offences; + type UnsignedPriority = ImOnlineUnsignedPriority; } impl pallet_offences::Trait for Runtime { @@ -527,46 +599,6 @@ impl pallet_identity::Trait for Runtime { type RegistrarOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; } -impl frame_system::offchain::CreateTransaction for Runtime { - type Public = ::Signer; - type Signature = Signature; - - fn create_transaction>( - call: Call, - public: Self::Public, - account: AccountId, - index: Index, - ) -> Option<(Call, ::SignaturePayload)> { - // take the biggest period possible. - let period = BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; - let current_block = System::block_number() - .saturated_into::() - // The `System::block_number` is initialized with `n+1`, - // so the actual block number is `n`. - .saturating_sub(1); - let tip = 0; - let extra: SignedExtra = ( - frame_system::CheckVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), - frame_system::CheckNonce::::from(index), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - Default::default(), - ); - let raw_payload = SignedPayload::new(call, extra).map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); - }).ok()?; - let signature = TSigner::sign(public, &raw_payload)?; - let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) - } -} - parameter_types! { pub const ConfigDepositBase: Balance = 5 * DOLLARS; pub const FriendDepositFactor: Balance = 50 * CENTS; @@ -635,12 +667,12 @@ construct_runtime!( Indices: pallet_indices::{Module, Call, Storage, Config, Event}, Balances: pallet_balances::{Module, Call, Storage, Config, Event}, TransactionPayment: pallet_transaction_payment::{Module, Storage}, - Staking: pallet_staking::{Module, Call, Config, Storage, Event}, + Staking: pallet_staking::{Module, Call, Config, Storage, Event, ValidateUnsigned}, Session: pallet_session::{Module, Call, Storage, Event, Config}, Democracy: pallet_democracy::{Module, Call, Storage, Config, Event}, Council: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, TechnicalCommittee: pallet_collective::::{Module, Call, Storage, Origin, Event, Config}, - Elections: pallet_elections_phragmen::{Module, Call, Storage, Event}, + Elections: pallet_elections_phragmen::{Module, Call, Storage, Event, Config}, TechnicalMembership: pallet_membership::::{Module, Call, Storage, Event, Config}, FinalityTracker: pallet_finality_tracker::{Module, Call, Inherent}, Grandpa: pallet_grandpa::{Module, Call, Storage, Config, Event}, @@ -655,6 +687,7 @@ construct_runtime!( Society: pallet_society::{Module, Call, Storage, Event, Config}, Recovery: pallet_recovery::{Module, Call, Storage, Event}, Vesting: pallet_vesting::{Module, Call, Storage, Event, Config}, + Scheduler: pallet_scheduler::{Module, Call, Storage, Event}, } ); @@ -731,8 +764,11 @@ impl_runtime_apis! { } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity { - Executive::validate_transaction(tx) + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) } } @@ -842,48 +878,41 @@ impl_runtime_apis! { #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( - module: Vec, - extrinsic: Vec, + pallet: Vec, + benchmark: Vec, lowest_range_values: Vec, highest_range_values: Vec, steps: Vec, repeat: u32, - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::Benchmarking; - - let result = match module.as_slice() { - b"pallet-balances" | b"balances" => Balances::run_benchmark( - extrinsic, - lowest_range_values, - highest_range_values, - steps, - repeat, - ), - b"pallet-identity" | b"identity" => Identity::run_benchmark( - extrinsic, - lowest_range_values, - highest_range_values, - steps, - repeat, - ), - b"pallet-timestamp" | b"timestamp" => Timestamp::run_benchmark( - extrinsic, - lowest_range_values, - highest_range_values, - steps, - repeat, - ), - b"pallet-vesting" | b"vesting" => Vesting::run_benchmark( - extrinsic, - lowest_range_values, - highest_range_values, - steps, - repeat, - ), - _ => Err("Benchmark not found for this pallet."), - }; - - result.map_err(|e| e.into()) + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. + // To get around that, we separated the Session benchmarks into its own crate, which is why + // we need these two lines below. + use pallet_session_benchmarking::Module as SessionBench; + use pallet_offences_benchmarking::Module as OffencesBench; + + impl pallet_session_benchmarking::Trait for Runtime {} + impl pallet_offences_benchmarking::Trait for Runtime {} + + let mut batches = Vec::::new(); + let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); + + add_benchmark!(params, batches, b"balances", Balances); + add_benchmark!(params, batches, b"collective", Council); + add_benchmark!(params, batches, b"democracy", Democracy); + add_benchmark!(params, batches, b"identity", Identity); + add_benchmark!(params, batches, b"im-online", ImOnline); + add_benchmark!(params, batches, b"session", SessionBench::); + add_benchmark!(params, batches, b"staking", Staking); + add_benchmark!(params, batches, b"timestamp", Timestamp); + add_benchmark!(params, batches, b"treasury", Treasury); + add_benchmark!(params, batches, b"utility", Utility); + add_benchmark!(params, batches, b"vesting", Vesting); + add_benchmark!(params, batches, b"offences", OffencesBench::); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) } } } @@ -912,39 +941,7 @@ mod tests { >, {} - is_submit_signed_transaction::(); - is_sign_and_submit_transaction::(); - } - - #[test] - fn block_hooks_weight_should_not_exceed_limits() { - use frame_support::weights::WeighBlock; - let check_for_block = |b| { - let block_hooks_weight = - >::on_initialize(b) + - >::on_finalize(b); - - assert_eq!( - block_hooks_weight, - 0, - "This test might fail simply because the value being compared to has increased to a \ - module declaring a new weight for a hook or call. In this case update the test and \ - happily move on.", - ); - - // Invariant. Always must be like this to have a sane chain. - assert!(block_hooks_weight < MaximumBlockWeight::get()); - - // Warning. - if block_hooks_weight > MaximumBlockWeight::get() / 2 { - println!( - "block hooks weight is consuming more than a block's capacity. You probably want \ - to re-think this. This test will fail now." - ); - assert!(false); - } - }; - - let _ = (0..100_000).for_each(check_for_block); + is_submit_signed_transaction::>(); + is_sign_and_submit_transaction::>(); } } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 97d76467fea4289e4bdea0b6ed7ce8d70d088019..df73e20070d2cedca064206d6b6dfc9b796272f2 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-testing" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" @@ -10,49 +10,48 @@ repository = "https://github.com/paritytech/substrate/" publish = true [dependencies] -pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } -sc-client-db = { version = "0.8.0-alpha.2", path = "../../../client/db/", features = ["kvdb-rocksdb"] } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0-alpha.2", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0-alpha.2", path = "../executor" } -node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } -node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } -frame-support = { version = "2.0.0-alpha.2", path = "../../../frame/support" } -pallet-session = { version = "2.0.0-alpha.2", path = "../../../frame/session" } -pallet-society = { version = "2.0.0-alpha.2", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0-alpha.2", path = "../../../frame/staking" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../../../frame/balances" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client/" } +sc-client-db = { version = "0.8.0-alpha.5", path = "../../../client/db/", features = ["kvdb-rocksdb"] } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api/" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +pallet-contracts = { version = "2.0.0-alpha.5", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-alpha.5", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-alpha.5", path = "../../../frame/indices" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../../primitives/keyring" } +node-executor = { version = "2.0.0-alpha.5", path = "../executor" } +node-primitives = { version = "2.0.0-alpha.5", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } +frame-support = { version = "2.0.0-alpha.5", path = "../../../frame/support" } +pallet-session = { version = "2.0.0-alpha.5", path = "../../../frame/session" } +pallet-society = { version = "2.0.0-alpha.5", path = "../../../frame/society" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-alpha.5", path = "../../../frame/staking" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +frame-system = { version = "2.0.0-alpha.5", path = "../../../frame/system" } substrate-test-client = { version = "2.0.0-dev", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0-alpha.2", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0-alpha.2", path = "../../../frame/treasury" } +pallet-timestamp = { version = "2.0.0-alpha.5", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-alpha.5", path = "../../../frame/treasury" } wabt = "0.9.2" -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-alpha.3", path = "../../../client/block-builder" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../../client/block-builder" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } log = "0.4.8" tempfile = "3.1.0" fs_extra = "1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } -sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service", features = ["rocksdb"] } +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sc-service = { version = "0.8.0-alpha.5", path = "../../../client/service", features = ["rocksdb"] } -[[bench]] -name = "import" -harness = false +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/node/testing/benches/import.rs b/bin/node/testing/benches/import.rs deleted file mode 100644 index b36d2e1181b0a2483ee35fc50e43c1d2ebf27522..0000000000000000000000000000000000000000 --- a/bin/node/testing/benches/import.rs +++ /dev/null @@ -1,259 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Block import benchmark. -//! -//! This benchmark is expected to measure block import operation of -//! some more or less full block. -//! -//! As we also want to protect against cold-cache attacks, this -//! benchmark should not rely on any caching (except those that -//! DO NOT depend on user input). Thus block generation should be -//! based on randomized operation. -//! -//! This is supposed to be very simple benchmark and is not subject -//! to much configuring - just block full of randomized transactions. -//! It is not supposed to measure runtime modules weight correctness - -use std::fmt; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes}; -use node_primitives::Block; -use sp_runtime::generic::BlockId; -use criterion::{Criterion, criterion_group, criterion_main}; -use sc_client_api::backend::Backend; - -criterion_group!( - name = benches; - config = Criterion::default().sample_size(50).warm_up_time(std::time::Duration::from_secs(20)); - targets = bench_block_import, bench_account_reaping, bench_account_ed25519 -); -criterion_group!( - name = wasm_size; - config = Criterion::default().sample_size(10); - targets = bench_wasm_size_import -); -criterion_group!( - name = profile; - config = Criterion::default().sample_size(10); - targets = profile_block_import -); -criterion_main!(benches, profile); - -fn bench_block_import(c: &mut Criterion) { - sc_cli::init_logger(""); - // for future uses, uncomment if something wrong. - // sc_cli::init_logger("sc_client=debug"); - - let mut bench_db = BenchDb::new(100); - let block = bench_db.generate_block(BlockType::RandomTransfers(100)); - - log::trace!( - target: "bench-logistics", - "Seed database directory: {}", - bench_db.path().display(), - ); - - c.bench_function_over_inputs("import-block-B-0001", - move |bencher, profile| { - bencher.iter_batched( - || { - let context = bench_db.create_context(*profile); - - // mostly to just launch compiler before benching! - let version = context.client.runtime_version_at(&BlockId::Number(0)) - .expect("Failed to get runtime version") - .spec_version; - - log::trace!( - target: "bench-logistics", - "Next iteration database directory: {}, runtime version: {}", - context.path().display(), version, - ); - - context - }, - |mut context| { - let start = std::time::Instant::now(); - context.import_block(block.clone()); - let elapsed = start.elapsed(); - - log::info!( - target: "bench-logistics", - "imported block with {} tx, took: {:#?}", - block.extrinsics.len(), - elapsed, - ); - - log::info!( - target: "bench-logistics", - "usage info: {}", - context.backend.usage_info() - .expect("RocksDB backend always provides usage info!"), - ); - }, - criterion::BatchSize::LargeInput, - ); - }, - vec![Profile::Wasm, Profile::Native], - ); -} - -fn bench_account_reaping(c: &mut Criterion) { - sc_cli::init_logger(""); - - let mut bench_db = BenchDb::new(100); - let block = bench_db.generate_block(BlockType::RandomTransfersReaping(100)); - - c.bench_function_over_inputs("import-block-reaping-B-0002", - move |bencher, profile| { - bencher.iter_batched( - || { - let context = bench_db.create_context(*profile); - - // mostly to just launch compiler before benching! - context.client.runtime_version_at(&BlockId::Number(0)) - .expect("Failed to get runtime version"); - - context - }, - |mut context| { - context.import_block(block.clone()); - }, - criterion::BatchSize::LargeInput, - ); - }, - vec![Profile::Wasm, Profile::Native], - ); -} - -fn bench_account_ed25519(c: &mut Criterion) { - sc_cli::init_logger(""); - - let mut bench_db = BenchDb::with_key_types(100, KeyTypes::Ed25519); - let block = bench_db.generate_block(BlockType::RandomTransfers(100)); - - c.bench_function_over_inputs("import-block-ed25519-B-0003", - move |bencher, profile| { - bencher.iter_batched( - || { - let context = bench_db.create_context(*profile); - context.client.runtime_version_at(&BlockId::Number(0)) - .expect("Failed to get runtime version"); - - context - }, - |mut context| { - context.import_block(block.clone()); - }, - criterion::BatchSize::LargeInput, - ); - }, - vec![Profile::Wasm, Profile::Native], - ); -} - -// This is not an actual benchmark, so don't use it to measure anything. -// It just produces special pattern of cpu load that allows easy picking -// the part of block import for the profiling in the tool of choice. -fn profile_block_import(c: &mut Criterion) { - sc_cli::init_logger(""); - - let mut bench_db = BenchDb::new(128); - let block = bench_db.generate_block(BlockType::RandomTransfers(100)); - - c.bench_function("profile block", - move |bencher| { - bencher.iter_batched( - || { - bench_db.create_context(Profile::Native) - }, - |mut context| { - // until better osx signpost/callgrind signal is possible to use - // in rust, we just pause everything completely to help choosing - // actual profiling interval - std::thread::park_timeout(std::time::Duration::from_secs(2)); - context.import_block(block.clone()); - // and here as well - std::thread::park_timeout(std::time::Duration::from_secs(2)); - log::info!( - target: "bench-logistics", - "imported block, usage info: {}", - context.backend.usage_info() - .expect("RocksDB backend always provides usage info!"), - ) - }, - criterion::BatchSize::PerIteration, - ); - }, - ); -} - -struct Setup { - db: BenchDb, - block: Block, -} - -struct SetupIterator { - current: usize, - finish: usize, - multiplier: usize, -} - -impl SetupIterator { - fn new(current: usize, finish: usize, multiplier: usize) -> Self { - SetupIterator { current, finish, multiplier } - } -} - -impl Iterator for SetupIterator { - type Item = Setup; - - fn next(&mut self) -> Option { - if self.current >= self.finish { return None } - - self.current += 1; - - let size = self.current * self.multiplier; - let mut db = BenchDb::new(size); - let block = db.generate_block(BlockType::RandomTransfers(size)); - Some(Setup { db, block }) - } -} - -impl fmt::Debug for Setup { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "Setup: {} tx/block", self.block.extrinsics.len()) - } -} - -fn bench_wasm_size_import(c: &mut Criterion) { - sc_cli::init_logger(""); - - c.bench_function_over_inputs("wasm_size_import", - move |bencher, setup| { - bencher.iter_batched( - || { - setup.db.create_context(Profile::Wasm) - }, - |mut context| { - context.import_block(setup.block.clone()); - }, - criterion::BatchSize::PerIteration, - ); - }, - SetupIterator::new(5, 15, 50), - ); -} diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index d49a6411cfd8f67a92b276c6c7758f1669d5628a..2ca6428bedc3000fd3f75bb0ce573fea2d133718 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -186,7 +186,7 @@ impl BenchDb { pruning: PruningMode::ArchiveAll, source: sc_client_db::DatabaseSettingsSrc::Path { path: dir.into(), - cache_size: None, + cache_size: 512, }, }; @@ -197,6 +197,7 @@ impl BenchDb { None, None, ExecutionExtensions::new(profile.into_execution_strategies(), None), + sp_core::tasks::executor(), None, ).expect("Should not fail"); diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index e35059e0c6ffd300cb2941e4322e22c8bbf832ee..8a57010770f3dedf9795e0da462455943fb6f74d 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -109,6 +109,7 @@ pub fn config_endowed( pallet_collective_Instance1: Some(Default::default()), pallet_collective_Instance2: Some(Default::default()), pallet_membership_Instance1: Some(Default::default()), + pallet_elections_phragmen: Some(Default::default()), pallet_sudo: Some(Default::default()), pallet_treasury: Some(Default::default()), pallet_society: Some(SocietyConfig { diff --git a/bin/node/transaction-factory/Cargo.toml b/bin/node/transaction-factory/Cargo.toml index 44f2d87f9c56dd82cf9abdae08b287546a181321..33ebeb767ae37e75c04309c958e3de79096bee67 100644 --- a/bin/node/transaction-factory/Cargo.toml +++ b/bin/node/transaction-factory/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "node-transaction-factory" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -8,16 +8,19 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } -sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../../client/block-builder" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../../primitives/block-builder" } +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../../client/block-builder" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } log = "0.4.8" -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index 5e51c4358e513d8c6a3ec5164d3aba0520d3f405..bab393a86a49d03e2e0201dfcb6c7f89add1a9f5 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "chain-spec-builder" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -10,8 +10,12 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } -node-cli = { version = "2.0.0-alpha.2", path = "../../node/cli" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../../client/keystore" } +sc-chain-spec = { version = "2.0.0-alpha.5", path = "../../../client/chain-spec" } +node-cli = { version = "2.0.0-alpha.5", path = "../../node/cli" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } rand = "0.7.2" structopt = "0.3.8" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index 3673909706cdb765508e4e72230d66c0ffc46635..144018701460e79254db45b81cd1ef2431e2b866 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -87,7 +87,7 @@ fn genesis_constructor( let authorities = authority_seeds .iter() .map(AsRef::as_ref) - .map(chain_spec::get_authority_keys_from_seed) + .map(chain_spec::authority_keys_from_seed) .collect::>(); let enable_println = true; @@ -120,6 +120,7 @@ fn generate_chain_spec( let chain_spec = chain_spec::ChainSpec::from_genesis( "Custom", "custom", + sc_chain_spec::ChainType::Live, move || genesis_constructor(&authority_seeds, &endowed_accounts, &sudo_account), vec![], None, @@ -142,7 +143,7 @@ fn generate_authority_keys_and_store( ).map_err(|err| err.to_string())?; let (_, _, grandpa, babe, im_online, authority_discovery) = - chain_spec::get_authority_keys_from_seed(seed); + chain_spec::authority_keys_from_seed(seed); let insert_key = |key_type, public| { keystore.write().insert_unknown( diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 252978a3a98cbc143139b9cdfba1169a1ebc6de9..672f25275c74fbfbab2c1f0908cfbefef8afed42 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "subkey" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,29 +9,31 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] futures = "0.1.29" -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -node-runtime = { version = "2.0.0-alpha.2", path = "../../node/runtime" } -node-primitives = { version = "2.0.0-alpha.2", path = "../../node/primitives" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +node-runtime = { version = "2.0.0-alpha.5", path = "../../node/runtime" } +node-primitives = { version = "2.0.0-alpha.5", path = "../../node/primitives" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } rand = "0.7.2" clap = "2.33.0" tiny-bip39 = "0.7" -rustc-hex = "2.0.1" -substrate-bip39 = "0.3.1" +substrate-bip39 = "0.4.1" hex = "0.4.0" hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.2.0" } -frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +frame-system = { version = "2.0.0-alpha.5", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../../../frame/transaction-payment" } rpassword = "4.0.1" itertools = "0.8.2" derive_more = { version = "0.99.2" } -sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } +sc-rpc = { version = "2.0.0-alpha.5", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.0.3", features = ["http"] } hyper = "0.12.35" -libp2p = "0.16.2" +libp2p = "0.17.0" serde_json = "1.0" [features] bench = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index 33209692caffb87284318bc59962ab56bcb6cbf7..08a46f1190ac28011314a18fee2b34b7211f5efd 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -79,13 +79,15 @@ trait Crypto: Sized { ) where ::Public: PublicT, { + let v = network_override.unwrap_or_default(); if let Ok((pair, seed)) = Self::Pair::from_phrase(uri, password) { let public_key = Self::public_from_pair(&pair); - + match output { OutputType::Json => { let json = json!({ "secretPhrase": uri, + "networkId": String::from(v), "secretSeed": format_seed::(seed), "publicKey": format_public_key::(public_key.clone()), "accountId": format_account_id::(public_key), @@ -95,11 +97,13 @@ trait Crypto: Sized { }, OutputType::Text => { println!("Secret phrase `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", + Network ID/version: {}\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", uri, + String::from(v), format_seed::(seed), format_public_key::(public_key.clone()), format_account_id::(public_key), @@ -114,6 +118,7 @@ trait Crypto: Sized { OutputType::Json => { let json = json!({ "secretKeyUri": uri, + "networkId": String::from(v), "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, "publicKey": format_public_key::(public_key.clone()), "accountId": format_account_id::(public_key), @@ -123,11 +128,13 @@ trait Crypto: Sized { }, OutputType::Text => { println!("Secret Key URI `{}` is account:\n \ - Secret seed: {}\n \ - Public key (hex): {}\n \ - Account ID: {}\n \ - SS58 Address: {}", + Network ID/version: {}\n \ + Secret seed: {}\n \ + Public key (hex): {}\n \ + Account ID: {}\n \ + SS58 Address: {}", uri, + String::from(v), if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, format_public_key::(public_key.clone()), format_account_id::(public_key), @@ -135,7 +142,6 @@ trait Crypto: Sized { ); }, } - } else if let Ok((public_key, v)) = ::Public::from_string_with_version(uri) { @@ -167,7 +173,7 @@ trait Crypto: Sized { }, } } else { - println!("Invalid phrase/URI given"); + eprintln!("Invalid phrase/URI given"); } } } @@ -649,7 +655,7 @@ fn read_pair( } fn format_signature(signature: &SignatureOf) -> String { - format!("{}", hex::encode(signature)) + format!("{}", HexDisplay::from(&signature.as_ref())) } fn format_seed(seed: SeedOf) -> String { @@ -712,7 +718,7 @@ fn create_extrinsic( } fn print_extrinsic(extrinsic: UncheckedExtrinsic) { - println!("0x{}", hex::encode(&extrinsic.encode())); + println!("0x{}", HexDisplay::from(&extrinsic.encode())); } fn print_usage(matches: &ArgMatches) { diff --git a/bin/utils/subkey/src/rpc.rs b/bin/utils/subkey/src/rpc.rs index 7b3cde595867bd46e49ad840178e407500dfdabc..e08ccc19a22b8de89088ffade2809f7418eb7d25 100644 --- a/bin/utils/subkey/src/rpc.rs +++ b/bin/utils/subkey/src/rpc.rs @@ -42,7 +42,7 @@ impl RpcClient { client.insert_key(key_type, suri, public).map(|_| ()) }) .map_err(|e| { - println!("Error inserting key: {:?}", e); + eprintln!("Error inserting key: {:?}", e); }) ); } diff --git a/bin/utils/subkey/src/vanity.rs b/bin/utils/subkey/src/vanity.rs index ff8703a9905ced71da4a6b4a6795f53fad6613d9..f921470946ec04732654091922711808f8aa6ad7 100644 --- a/bin/utils/subkey/src/vanity.rs +++ b/bin/utils/subkey/src/vanity.rs @@ -62,6 +62,12 @@ fn calculate_score(_desired: &str, key: &str) -> usize { 0 } +/// Validate whether the char is allowed to be used in base58. +/// num 0, lower l, upper I and O are not allowed. +fn validate_base58(c :char) -> bool { + c.is_alphanumeric() && !"0lIO".contains(c) +} + pub(super) fn generate_key(desired: &str) -> Result, &'static str> where PublicOf: PublicT, { @@ -69,7 +75,12 @@ pub(super) fn generate_key(desired: &str) -> Result, &'sta return Err("Pattern must not be empty"); } - println!("Generating key containing pattern '{}'", desired); + if !desired.chars().all(validate_base58) { + return Err("Pattern can only contains valid characters in base58 \ + (all alphanumeric except for 0, l, I and O)"); + } + + eprintln!("Generating key containing pattern '{}'", desired); let top = 45 + (desired.len() * 48); let mut best = 0; @@ -94,14 +105,14 @@ pub(super) fn generate_key(desired: &str) -> Result, &'sta score: score, }; if best >= top { - println!("best: {} == top: {}", best, top); + eprintln!("best: {} == top: {}", best, top); return Ok(keypair); } } done += 1; if done % good_waypoint(done) == 0 { - println!("{} keys searched; best is {}/{} complete", done, best, top); + eprintln!("{} keys searched; best is {}/{} complete", done, best, top); } } } @@ -162,6 +173,22 @@ mod tests { ); } + #[test] + fn test_invalid_pattern() { + assert!(generate_key::("").is_err()); + assert!(generate_key::("0").is_err()); + assert!(generate_key::("l").is_err()); + assert!(generate_key::("I").is_err()); + assert!(generate_key::("O").is_err()); + assert!(generate_key::("!").is_err()); + } + + #[test] + fn test_valid_pattern() { + assert!(generate_key::("o").is_ok()); + assert!(generate_key::("L").is_ok()); + } + #[cfg(feature = "bench")] #[bench] fn bench_paranoiac(b: &mut Bencher) { diff --git a/client/Cargo.toml b/client/Cargo.toml index 61199f04da970bbefb4f494e2d46a5824538d499..5d4b5927111ea220c33256c50c8250fe198ec840 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,37 +9,41 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate Client and associated logic." [dependencies] -sc-block-builder = { version = "0.8.0-alpha.2", path = "block-builder" } -sc-client-api = { version = "2.0.0-alpha.2", path = "api" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "block-builder" } +sc-client-api = { version = "2.0.0-alpha.5", path = "api" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-alpha.2", path = "executor" } -sp-externalities = { version = "0.8.0-alpha.2", path = "../primitives/externalities" } +sc-executor = { version = "0.8.0-alpha.5", path = "executor" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1", features = ["compat"] } hash-db = { version = "0.15.2" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../primitives/inherents" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../primitives/keyring" } -kvdb = "0.4.0" +sp-inherents = { version = "2.0.0-alpha.5", path = "../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../primitives/keyring" } +kvdb = "0.5.0" log = { version = "0.4.8" } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-alpha.2", path = "../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", path = "../primitives/std" } -sp-version = { version = "2.0.0-alpha.2", path = "../primitives/version" } -sp-api = { version = "2.0.0-alpha.2", path = "../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../primitives/runtime" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../primitives/blockchain" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "telemetry" } -sp-trie = { version = "2.0.0-alpha.2", path = "../primitives/trie" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../utils/prometheus" } +sp-core = { version = "2.0.0-alpha.5", path = "../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", path = "../primitives/std" } +sp-version = { version = "2.0.0-alpha.5", path = "../primitives/version" } +sp-api = { version = "2.0.0-alpha.5", path = "../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../primitives/utils" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../primitives/blockchain" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "telemetry" } +sp-trie = { version = "2.0.0-alpha.5", path = "../primitives/trie" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.5", path = "../utils/prometheus" } tracing = "0.1.10" [dev-dependencies] env_logger = "0.7.0" tempfile = "3.1.0" substrate-test-runtime-client = { version = "2.0.0-dev", path = "../test-utils/runtime/client" } -kvdb-memorydb = "0.4.0" -sp-panic-handler = { version = "2.0.0-alpha.2", path = "../primitives/panic-handler" } +kvdb-memorydb = "0.5.0" +sp-panic-handler = { version = "2.0.0-alpha.5", path = "../primitives/panic-handler" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 7ceb12eaf60bb780bca061539852bc1a42c8819f..dbe6afe9c7cae00c99abca8464dd4acd4c4f8a3c 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,31 +11,36 @@ documentation = "https://docs.rs/sc-client-api" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } -sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } +sc-executor = { version = "0.8.0-alpha.5", path = "../executor" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1" } hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } -kvdb = "0.4.0" +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } +kvdb = "0.5.0" log = { version = "0.4.8" } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } -sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } -sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +lazy_static = "1.4.0" +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/version" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } +sp-trie = { version = "2.0.0-alpha.5", path = "../../primitives/trie" } +sp-storage = { version = "2.0.0-alpha.5", path = "../../primitives/storage" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } [dev-dependencies] sp-test-primitives = { version = "2.0.0-dev", path = "../../primitives/test-primitives" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 06d49da640da6158dd373dd4ab3072c8639e9534..c855cd3a0832883c3a66b5c986871909aece048a 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -17,7 +17,6 @@ //! A set of APIs supported by the client along with their primitives. use std::{fmt, collections::HashSet}; -use futures::channel::mpsc; use sp_core::storage::StorageKey; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, @@ -28,13 +27,14 @@ use sp_consensus::BlockOrigin; use crate::blockchain::Info; use crate::notifications::StorageEventStream; +use sp_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain; /// Type that implements `futures::Stream` of block import events. -pub type ImportNotifications = mpsc::UnboundedReceiver>; +pub type ImportNotifications = TracingUnboundedReceiver>; /// A stream of block finality notifications. -pub type FinalityNotifications = mpsc::UnboundedReceiver>; +pub type FinalityNotifications = TracingUnboundedReceiver>; /// Expected hashes of blocks at given heights. /// @@ -179,8 +179,12 @@ pub struct IoInfo { pub state_reads: u64, /// State reads (keys) from cache. pub state_reads_cache: u64, - /// State reads (keys) from cache. + /// State reads (keys) pub state_writes: u64, + /// State write (keys) already cached. + pub state_writes_cache: u64, + /// State write (trie nodes) to backend db. + pub state_writes_nodes: u64, } /// Usage statistics for running client instance. @@ -202,7 +206,7 @@ impl fmt::Display for UsageInfo { f, "caches: ({} state, {} db overlay), \ state db: ({} non-canonical, {} pruning, {} pinned), \ - i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} key writes)", + i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} trie nodes writes)", self.memory.state_cache, self.memory.database_cache, self.memory.state_db.non_canonical, @@ -214,7 +218,7 @@ impl fmt::Display for UsageInfo { self.io.average_transaction_size, self.io.state_reads_cache, self.io.state_reads, - self.io.state_writes, + self.io.state_writes_nodes, ) } } diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 66f51e75c793782123757be13a24c23ede1f96fa..e4080323c188eb5ded7f85ab14e229e9beae117a 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -34,7 +34,7 @@ pub use light::*; pub use notifications::*; pub use proof_provider::*; -pub use sp_state_machine::{StorageProof, ExecutionStrategy}; +pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn}; /// Utility methods for the client. pub mod utils { diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index 72a9f357fce337c7dec32758ff5aa2d0a8b6bd6a..f154eade44d5e37cd76a3bad97e21d46e3ed779d 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -22,9 +22,9 @@ use std::{ }; use fnv::{FnvHashSet, FnvHashMap}; -use futures::channel::mpsc; use sp_core::storage::{StorageKey, StorageData}; use sp_runtime::traits::Block as BlockT; +use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; /// Storage change set #[derive(Debug)] @@ -67,7 +67,7 @@ impl StorageChangeSet { } /// Type that implements `futures::Stream` of storage change events. -pub type StorageEventStream = mpsc::UnboundedReceiver<(H, StorageChangeSet)>; +pub type StorageEventStream = TracingUnboundedReceiver<(H, StorageChangeSet)>; type SubscriberId = u64; @@ -82,7 +82,7 @@ pub struct StorageNotifications { FnvHashSet )>, sinks: FnvHashMap, + TracingUnboundedSender<(Block::Hash, StorageChangeSet)>, Option>, Option>>>, )>, @@ -299,7 +299,7 @@ impl StorageNotifications { // insert sink - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_storage_notification_items"); self.sinks.insert(current_id, (tx, keys, child_keys)); rx } diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index a7ea523552d8f7ac1a51c903d23b06fd73dd9181..7521101ae6f2a2f0e1df77bb25df381c45e3bf66 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-authority-discovery" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" @@ -14,27 +14,30 @@ prost-build = "0.6.1" [dependencies] bytes = "0.5.0" -codec = { package = "parity-scale-codec", default-features = false, version = "1.2.0" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.3.0" } derive_more = "0.99.2" -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.16.2", default-features = false, features = ["secp256k1", "libp2p-websocket"] } +libp2p = { version = "0.17.0", default-features = false, features = ["secp256k1", "libp2p-websocket"] } log = "0.4.8" -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-alpha.2" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-alpha.5"} prost = "0.6.1" rand = "0.7.2" -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../keystore" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0-alpha.2", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-authority-discovery = { version = "2.0.0-alpha.5", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } [dev-dependencies] env_logger = "0.7.0" quickcheck = "0.9.0" -sc-peerset = { version = "2.0.0-alpha.2", path = "../peerset" } +sc-peerset = { version = "2.0.0-alpha.5", path = "../peerset" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client"} + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index 74b7043c29ba0c3fb8439c6dde47c619183831ed..751e3e76e921f874556190a2bea1e15546fb1f09 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -16,6 +16,8 @@ //! Authority discovery errors. +use sp_core::crypto::CryptoTypePublicPair; + /// AuthorityDiscovery Result. pub type Result = std::result::Result; @@ -38,14 +40,21 @@ pub enum Error { MatchingHashedAuthorityIdWithAuthorityId, /// Failed to set the authority discovery peerset priority group in the peerset module. SettingPeersetPriorityGroup(String), + /// The sender side of the dht event stream has been closed likely due to the network + /// terminating. + DhtEventStreamTerminated, /// Failed to encode a protobuf payload. EncodingProto(prost::EncodeError), /// Failed to decode a protobuf payload. DecodingProto(prost::DecodeError), - /// Failed to encode or decode scale payload + /// Failed to encode or decode scale payload. EncodingDecodingScale(codec::Error), /// Failed to parse a libp2p multi address. ParsingMultiaddress(libp2p::core::multiaddr::Error), + /// Failed to sign using a specific public key. + MissingSignature(CryptoTypePublicPair), + /// Failed to sign using all public keys. + Signing, /// Failed to register Prometheus metric. Prometheus(prometheus_endpoint::PrometheusError), } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 171a401bbbf8b5d00bb80d06ee0dbe2ad7d482ce..1a8a5c9f40f699756073179f037271fac974dd41 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -56,14 +56,13 @@ use futures_timer::Delay; use codec::{Decode, Encode}; use error::{Error, Result}; -use libp2p::Multiaddr; use log::{debug, error, log_enabled, warn}; use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; use prost::Message; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{DhtEvent, ExHashT, NetworkStateInfo}; +use sc_network::{Multiaddr, config::MultiaddrWithPeerId, DhtEvent, ExHashT, NetworkStateInfo}; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; -use sp_core::crypto::{key_types, Pair}; +use sp_core::crypto::{key_types, CryptoTypePublicPair, Pair}; use sp_core::traits::BareCryptoStorePtr; use sp_runtime::{traits::Block as BlockT, generic::BlockId}; use sp_api::ProvideRuntimeApi; @@ -187,7 +186,7 @@ where pub fn new( client: Arc, network: Arc, - sentry_nodes: Vec, + sentry_nodes: Vec, key_store: BareCryptoStorePtr, dht_event_rx: Pin + Send>>, prometheus_registry: Option, @@ -210,18 +209,7 @@ where ); let sentry_nodes = if !sentry_nodes.is_empty() { - let addrs = sentry_nodes.into_iter().filter_map(|a| match a.parse() { - Ok(addr) => Some(addr), - Err(e) => { - error!( - target: "sub-authority-discovery", - "Failed to parse sentry node public address '{:?}', continuing anyways.", e, - ); - None - } - }).collect::>(); - - Some(addrs) + Some(sentry_nodes.into_iter().map(|ma| ma.concat()).collect::>()) } else { None }; @@ -283,19 +271,36 @@ where .encode(&mut serialized_addresses) .map_err(Error::EncodingProto)?; - for key in self.get_priv_keys_within_authority_set()?.into_iter() { - let signature = key.sign(&serialized_addresses); + let keys: Vec = self.get_own_public_keys_within_authority_set()? + .into_iter() + .map(Into::into) + .collect(); + let signatures = self.key_store + .read() + .sign_with_all( + key_types::AUTHORITY_DISCOVERY, + keys.clone(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing)?; + + for (sign_result, key) in signatures.iter().zip(keys) { let mut signed_addresses = vec![]; + + // sign_with_all returns Result signature + // is generated for a public key that is supported. + // Verify that all signatures exist for all provided keys. + let signature = sign_result.as_ref().map_err(|_| Error::MissingSignature(key.clone()))?; schema::SignedAuthorityAddresses { addresses: serialized_addresses.clone(), - signature: signature.encode(), + signature: Encode::encode(&signature), } .encode(&mut signed_addresses) .map_err(Error::EncodingProto)?; self.network.put_value( - hash_authority_id(key.public().as_ref())?, + hash_authority_id(key.1.as_ref()), signed_addresses, ); } @@ -318,16 +323,16 @@ where for authority_id in authorities.iter() { self.network - .get_value(&hash_authority_id(authority_id.as_ref())?); + .get_value(&hash_authority_id(authority_id.as_ref())); } Ok(()) } fn handle_dht_events(&mut self, cx: &mut Context) -> Result<()> { - while let Poll::Ready(Some(event)) = self.dht_event_rx.poll_next_unpin(cx) { - match event { - DhtEvent::ValueFound(v) => { + loop { + match self.dht_event_rx.poll_next_unpin(cx) { + Poll::Ready(Some(DhtEvent::ValueFound(v))) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_found"]).inc(); } @@ -342,7 +347,7 @@ where self.handle_dht_value_found_event(v)?; } - DhtEvent::ValueNotFound(hash) => { + Poll::Ready(Some(DhtEvent::ValueNotFound(hash))) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); } @@ -352,7 +357,7 @@ where "Value for hash '{:?}' not found on Dht.", hash ) }, - DhtEvent::ValuePut(hash) => { + Poll::Ready(Some(DhtEvent::ValuePut(hash))) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put"]).inc(); } @@ -362,7 +367,7 @@ where "Successfully put hash '{:?}' on Dht.", hash, ) }, - DhtEvent::ValuePutFailed(hash) => { + Poll::Ready(Some(DhtEvent::ValuePutFailed(hash))) => { if let Some(metrics) = &self.metrics { metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); } @@ -372,10 +377,12 @@ where "Failed to put hash '{:?}' on Dht.", hash ) }, + // The sender side of the dht event stream has been closed, likely due to the + // network terminating. + Poll::Ready(None) => return Err(Error::DhtEventStreamTerminated), + Poll::Pending => return Ok(()), } } - - Ok(()) } fn handle_dht_value_found_event( @@ -403,8 +410,8 @@ where self.addr_cache.retain_ids(&authorities); authorities .into_iter() - .map(|id| hash_authority_id(id.as_ref()).map(|h| (h, id))) - .collect::>>()? + .map(|id| (hash_authority_id(id.as_ref()), id)) + .collect::>() }; // Check if the event origins from an authority in the current authority set. @@ -446,21 +453,6 @@ where Ok(()) } - /// Retrieve all local authority discovery private keys that are within the current authority - /// set. - fn get_priv_keys_within_authority_set(&mut self) -> Result> { - let keys = self.get_own_public_keys_within_authority_set()? - .into_iter() - .map(std::convert::Into::into) - .filter_map(|pub_key| { - self.key_store.read().sr25519_key_pair(key_types::AUTHORITY_DISCOVERY, &pub_key) - }) - .map(std::convert::Into::into) - .collect(); - - Ok(keys) - } - /// Retrieve our public keys within the current authority set. // // A node might have multiple authority discovery keys within its keystore, e.g. an old one and @@ -493,7 +485,6 @@ where } /// Update the peer set 'authority' priority group. - // fn update_peer_set_priority_group(&self) -> Result<()> { let addresses = self.addr_cache.get_subset(); @@ -549,11 +540,18 @@ where match inner() { Ok(()) => {} + + // Handle fatal errors. + // + // Given that the network likely terminated authority discovery should do the same. + Err(Error::DhtEventStreamTerminated) => return Poll::Ready(()), + + // Handle non-fatal errors. Err(e) => error!(target: "sub-authority-discovery", "Poll failure: {:?}", e), }; - // Make sure to always return NotReady as this is a long running task with the same lifetime - // as the node itself. + // Return Poll::Pending as this is a long running task with the same lifetime as the node + // itself. Poll::Pending } } @@ -596,10 +594,8 @@ where } } -fn hash_authority_id(id: &[u8]) -> Result { - libp2p::multihash::encode(libp2p::multihash::Hash::SHA2256, id) - .map(|k| libp2p::kad::record::Key::new(&k)) - .map_err(Error::HashingAuthorityId) +fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { + libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) } fn interval_at(start: Instant, duration: Duration) -> Interval { diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 1296c2b627a1eab7c966744ae79d3bf2557e1456..923b7ee0f22db6c59e325a3f83b39a702a58f293 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -19,10 +19,11 @@ use std::{iter::FromIterator, sync::{Arc, Mutex}}; use futures::channel::mpsc::channel; use futures::executor::block_on; use futures::future::poll_fn; +use futures::poll; use libp2p::{kad, PeerId}; -use sp_api::{ApiExt, ApiErrorExt, Core, RuntimeVersion, StorageProof, ProvideRuntimeApi, ApiRef}; -use sp_core::{testing::KeyStore, ExecutionContext, NativeOrEncoded}; +use sp_api::{ProvideRuntimeApi, ApiRef}; +use sp_core::testing::KeyStore; use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; use substrate_test_runtime_client::runtime::Block; @@ -99,8 +100,7 @@ impl ProvideRuntimeApi for TestApi { fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { RuntimeApi { authorities: self.authorities.clone(), - } - .into() + }.into() } } @@ -120,6 +120,7 @@ impl HeaderBackend for TestApi { finalized_hash: Default::default(), finalized_number: Zero::zero(), genesis_hash: Default::default(), + number_leaves: Default::default(), } } @@ -149,90 +150,13 @@ struct RuntimeApi { authorities: Vec, } -impl Core for RuntimeApi { - fn Core_version_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result, sp_blockchain::Error> { - unimplemented!("Not required for testing!") - } - - fn Core_execute_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option, - _: Vec, - ) -> std::result::Result, sp_blockchain::Error> { - unimplemented!("Not required for testing!") - } - - fn Core_initialize_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<&::Header>, - _: Vec, - ) -> std::result::Result, sp_blockchain::Error> { - unimplemented!("Not required for testing!") - } -} - -impl ApiErrorExt for RuntimeApi { - type Error = sp_blockchain::Error; -} - -impl ApiExt for RuntimeApi { - type StateBackend = < - substrate_test_runtime_client::Backend as sc_client_api::backend::Backend - >::State; - - fn map_api_result std::result::Result, R, E>( - &self, - _: F - ) -> std::result::Result { - unimplemented!("Not required for testing!") - } - - fn runtime_version_at( - &self, - _: &BlockId, - ) -> std::result::Result { - unimplemented!("Not required for testing!") - } - - fn record_proof(&mut self) { - unimplemented!("Not required for testing!") - } - - fn extract_proof(&mut self) -> Option { - unimplemented!("Not required for testing!") - } - - fn into_storage_changes( - &self, - _: &Self::StateBackend, - _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, - _: ::Hash, - ) -> std::result::Result, String> - where Self: Sized - { - unimplemented!("Not required for testing!") - } -} +sp_api::mock_impl_runtime_apis! { + impl AuthorityDiscoveryApi for RuntimeApi { + type Error = sp_blockchain::Error; -impl AuthorityDiscoveryApi for RuntimeApi { - fn AuthorityDiscoveryApi_authorities_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result>, sp_blockchain::Error> { - return Ok(NativeOrEncoded::Native(self.authorities.clone())); + fn authorities(&self) -> Vec { + self.authorities.clone() + } } } @@ -381,7 +305,7 @@ fn handle_dht_events_with_value_found_should_call_set_priority_group() { // Create sample dht event. - let authority_id_1 = hash_authority_id(key_pair.public().as_ref()).unwrap(); + let authority_id_1 = hash_authority_id(key_pair.public().as_ref()); let address_1: Multiaddr = "/ip6/2001:db8::".parse().unwrap(); let mut serialized_addresses = vec![]; @@ -423,3 +347,36 @@ fn handle_dht_events_with_value_found_should_call_set_priority_group() { let _ = block_on(poll_fn(f)); } + +#[test] +fn terminate_when_event_stream_terminates() { + let (dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + key_store, + dht_event_rx.boxed(), + None, + ); + + block_on(async { + assert_eq!(Poll::Pending, poll!(&mut authority_discovery)); + + // Simulate termination of the network through dropping the sender side of the dht event + // channel. + drop(dht_event_tx); + + assert_eq!( + Poll::Ready(()), poll!(&mut authority_discovery), + "Expect the authority discovery module to terminate once the sending side of the dht \ + event channel is terminated.", + ); + }); +} diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 7503221f6556072b8b45d69b5b1f420397638a9c..040370ac491cf09932ef1e7467ebeb96847b5af8 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-basic-authorship" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,21 +10,25 @@ description = "Basic implementation of block-authoring logic." [dependencies] log = "0.4.8" -futures = "0.3.1" -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } +futures = "0.3.4" +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../primitives/inherents" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../block-builder" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } +futures-timer = "3.0.1" [dev-dependencies] -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../../client/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } parking_lot = "0.10.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index fc9a5dec0668dce922529277ba0ccb13eed0ad73..37bb34a4b67d993fb02bc6b1e5229db0c7fa7f81 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -23,7 +23,7 @@ use sc_client_api::backend; use codec::Decode; use sp_consensus::{evaluation, Proposal, RecordProof}; use sp_inherents::InherentData; -use log::{error, info, debug, trace}; +use log::{error, info, debug, trace, warn}; use sp_core::ExecutionContext; use sp_runtime::{ generic::BlockId, @@ -33,8 +33,8 @@ use sp_transaction_pool::{TransactionPool, InPoolTransaction}; use sc_telemetry::{telemetry, CONSENSUS_INFO}; use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sp_api::{ProvideRuntimeApi, ApiExt}; -use futures::prelude::*; -use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed}; +use futures::{executor, future, future::Either}; +use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed}; use std::marker::PhantomData; /// Proposer factory. @@ -76,7 +76,7 @@ impl ProposerFactory let id = BlockId::hash(parent_hash); - info!("Starting consensus session on top of parent {:?}", parent_hash); + info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); let proposer = Proposer { inner: Arc::new(ProposerInner { @@ -196,21 +196,43 @@ impl ProposerInner // We don't check the API versions any further here since the dispatch compatibility // check should be enough. - for extrinsic in self.client.runtime_api() + for inherent in self.client.runtime_api() .inherent_extrinsics_with_context( &self.parent_id, ExecutionContext::BlockConstruction, inherent_data )? { - block_builder.push(extrinsic)?; + match block_builder.push(inherent) { + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => + warn!("⚠️ Dropping non-mandatory inherent from overweight block."), + Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { + error!("❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."); + Err(ApplyExtrinsicFailed(Validity(e)))? + } + Err(e) => { + warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); + } + Ok(_) => {} + } } // proceed with transactions let mut is_first = true; let mut skipped = 0; let mut unqueue_invalid = Vec::new(); - let pending_iterator = self.transaction_pool.ready(); + let pending_iterator = match executor::block_on(future::select( + self.transaction_pool.ready_at(self.parent_number), + futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8), + )) { + Either::Left((iterator, _)) => iterator, + Either::Right(_) => { + log::warn!( + "Timeout fired waiting for transaction pool to be ready. Proceeding to block production anyway.", + ); + self.transaction_pool.ready() + } + }; debug!("Attempting to push transactions from the pool."); debug!("Pool status: {:?}", self.transaction_pool.status()); @@ -230,7 +252,7 @@ impl ProposerInner Ok(()) => { debug!("[{:?}] Pushed to the block.", pending_tx_hash); } - Err(sp_blockchain::Error::ApplyExtrinsicFailed(ApplyExtrinsicFailed::Validity(e))) + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { if is_first { debug!("[{:?}] Invalid transaction: FullBlock on empty block", pending_tx_hash); @@ -266,7 +288,7 @@ impl ProposerInner let (block, storage_changes, proof) = block_builder.build()?.into_inner(); - info!("Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", block.header().number(), ::Hash::from(block.header().hash()), block.header().parent_hash(), @@ -304,10 +326,14 @@ mod tests { prelude::*, runtime::{Extrinsic, Transfer}, }; + use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource}; use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_api::Core; use backend::Backend; use sp_blockchain::HeaderBackend; + use sp_runtime::traits::NumberFor; + + const SOURCE: TransactionSource = TransactionSource::External; fn extrinsic(nonce: u64) -> Extrinsic { Transfer { @@ -318,6 +344,17 @@ mod tests { }.into_signed_tx() } + fn chain_event(block_number: u64, header: B::Header) -> ChainEvent + where NumberFor: From + { + ChainEvent::NewBlock { + id: BlockId::Number(block_number.into()), + retracted: vec![], + is_new_best: true, + header, + } + } + #[test] fn should_cease_building_block_when_deadline_is_reached() { // given @@ -327,19 +364,30 @@ mod tests { ); futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)]) + txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) ).unwrap(); + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") + )) + ); + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); - let cell = Mutex::new(time::Instant::now()); + let cell = Mutex::new((false, time::Instant::now())); let mut proposer = proposer_factory.init_with_now( &client.header(&BlockId::number(0)).unwrap().unwrap(), Box::new(move || { let mut value = cell.lock(); - let old = *value; + if !value.0 { + value.0 = true; + return value.1; + } + let old = value.1; let new = old + time::Duration::from_secs(2); - *value = new; + *value = (true, new); old }) ); @@ -356,6 +404,36 @@ mod tests { assert_eq!(txpool.ready().count(), 2); } + #[test] + fn should_not_panic_when_deadline_is_reached() { + let client = Arc::new(substrate_test_runtime_client::new()); + let txpool = Arc::new( + BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone()))).0 + ); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + + let cell = Mutex::new((false, time::Instant::now())); + let mut proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + if !value.0 { + value.0 = true; + return value.1; + } + let new = value.1 + time::Duration::from_secs(160); + *value = (true, new); + new + }) + ); + + let deadline = time::Duration::from_secs(1); + futures::executor::block_on( + proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) + ).map(|r| r.block).unwrap(); + } + #[test] fn proposed_storage_changes_should_match_execute_block_storage_changes() { let (client, backend) = substrate_test_runtime_client::TestClientBuilder::new() @@ -368,9 +446,16 @@ mod tests { let block_id = BlockId::Hash(genesis_hash); futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), vec![extrinsic(0)]), + txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), ).unwrap(); + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") + )) + ); + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); let mut proposer = proposer_factory.init_with_now( @@ -412,7 +497,7 @@ mod tests { ); futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), vec![ + txpool.submit_at(&BlockId::number(0), SOURCE, vec![ extrinsic(0), extrinsic(1), Transfer { @@ -459,15 +544,26 @@ mod tests { block }; + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") + )) + ); + // let's create one block and import it let block = propose_block(&client, 0, 2, 7); client.import(BlockOrigin::Own, block).unwrap(); - // now let's make sure that we can still make some progress + futures::executor::block_on( + txpool.maintain(chain_event( + 1, + client.header(&BlockId::Number(1)).expect("header get error").expect("there should be header") + )) + ); - // This is most likely incorrect, and caused by #5139 - let tx_remaining = 0; - let block = propose_block(&client, 1, 2, tx_remaining); + // now let's make sure that we can still make some progress + let block = propose_block(&client, 1, 2, 5); client.import(BlockOrigin::Own, block).unwrap(); } } diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index dd4ebcb07f889fa962d27ebf776572b21ae98e62..5f9ef7c75e8591baaada2b2ff1a46fbebc72f231 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-block-builder" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,16 +10,19 @@ description = "Substrate block builder" [dependencies] -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../primitives/block-builder" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../primitives/block-builder" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } [dev-dependencies] substrate-test-runtime-client = { path = "../../test-utils/runtime/client" } -sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } +sp-trie = { version = "2.0.0-alpha.5", path = "../../primitives/trie" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 10d4fc2622480c70dbbd938e3210b1421c3e0f84..cf30ad606320e56981fd52255296f11ea607dffe 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,11 +9,15 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate chain configurations." [dependencies] -sc-chain-spec-derive = { version = "2.0.0-alpha.2", path = "./derive" } +sc-chain-spec-derive = { version = "2.0.0-alpha.5", path = "./derive" } impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-chain-spec = { version = "2.0.0-alpha.5", path = "../../primitives/chain-spec" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index df0e2f92b2e94a2bfbf47d5fd63214925919c14d..9343c9a6de1c95919abe695b4918bd576117efbf 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -18,3 +18,6 @@ quote = "1.0.3" syn = "1.0.7" [dev-dependencies] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 008af6f7e0f0f73e2b209ba237d745d5606c946e..9f3a10ee89769914eb7b3779d80de1d506fa5293 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -25,9 +25,8 @@ use serde::{Serialize, Deserialize}; use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; use sp_runtime::BuildStorage; use serde_json as json; -use crate::RuntimeGenesis; -use crate::extension::GetExtension; -use sc_network::Multiaddr; +use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; +use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; enum GenesisSource { @@ -137,7 +136,9 @@ enum Genesis { struct ClientSpec { name: String, id: String, - boot_nodes: Vec, + #[serde(default)] + chain_type: ChainType, + boot_nodes: Vec, telemetry_endpoints: Option, protocol_id: Option, properties: Option, @@ -149,9 +150,6 @@ struct ClientSpec { genesis: serde::de::IgnoredAny, } -/// Arbitrary properties defined in chain spec as a JSON object -pub type Properties = json::map::Map; - /// A type denoting empty extensions. /// /// We use `Option` here since `()` is not flattenable by serde. @@ -174,7 +172,7 @@ impl Clone for ChainSpec { impl ChainSpec { /// A list of bootnode addresses. - pub fn boot_nodes(&self) -> &[String] { + pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { &self.client_spec.boot_nodes } @@ -206,8 +204,8 @@ impl ChainSpec { } /// Add a bootnode to the list. - pub fn add_boot_node(&mut self, addr: Multiaddr) { - self.client_spec.boot_nodes.push(addr.to_string()) + pub fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { + self.client_spec.boot_nodes.push(addr) } /// Returns a reference to defined chain spec extensions. @@ -219,8 +217,9 @@ impl ChainSpec { pub fn from_genesis G + 'static + Send + Sync>( name: &str, id: &str, + chain_type: ChainType, constructor: F, - boot_nodes: Vec, + boot_nodes: Vec, telemetry_endpoints: Option, protocol_id: Option<&str>, properties: Option, @@ -229,6 +228,7 @@ impl ChainSpec { let client_spec = ClientSpec { name: name.to_owned(), id: id.to_owned(), + chain_type, boot_nodes, telemetry_endpoints, protocol_id: protocol_id.map(str::to_owned), @@ -243,6 +243,11 @@ impl ChainSpec { genesis: GenesisSource::Factory(Arc::new(constructor)), } } + + /// Type of the chain. + fn chain_type(&self) -> ChainType { + self.client_spec.chain_type.clone() + } } impl ChainSpec { @@ -318,9 +323,9 @@ impl ChainSpec { impl crate::ChainSpec for ChainSpec where G: RuntimeGenesis, - E: GetExtension + serde::Serialize + Clone, + E: GetExtension + serde::Serialize + Clone + Send, { - fn boot_nodes(&self) -> &[String] { + fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { ChainSpec::boot_nodes(self) } @@ -332,6 +337,10 @@ where ChainSpec::id(self) } + fn chain_type(&self) -> ChainType { + ChainSpec::chain_type(self) + } + fn telemetry_endpoints(&self) -> &Option { ChainSpec::telemetry_endpoints(self) } @@ -344,7 +353,7 @@ where ChainSpec::properties(self) } - fn add_boot_node(&mut self, addr: Multiaddr) { + fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { ChainSpec::add_boot_node(self, addr) } @@ -392,6 +401,7 @@ mod tests { ).unwrap(); assert_eq!(spec1.as_json(false), spec2.as_json(false)); + assert_eq!(spec2.chain_type(), ChainType::Live) } #[derive(Debug, Serialize, Deserialize)] diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index 509524fd0aeb7e46e691c1a2afdde6ad1279ea42..de83e170e039b807f7e283b14f613ec90de3e4fc 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -107,31 +107,33 @@ //! pub type MyChainSpec = GenericChainSpec; //! ``` - mod chain_spec; mod extension; -pub use chain_spec::{ChainSpec as GenericChainSpec, Properties, NoExtension}; +pub use chain_spec::{ChainSpec as GenericChainSpec, NoExtension}; pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; +pub use sp_chain_spec::{Properties, ChainType}; use serde::{Serialize, de::DeserializeOwned}; use sp_runtime::BuildStorage; -use sc_network::Multiaddr; +use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; /// A set of traits for the runtime genesis config. pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} impl RuntimeGenesis for T {} -/// Common interface to `GenericChainSpec` -pub trait ChainSpec: BuildStorage { +/// Common interface of a chain specification. +pub trait ChainSpec: BuildStorage + Send { /// Spec name. fn name(&self) -> &str; /// Spec id. fn id(&self) -> &str; + /// Type of the chain. + fn chain_type(&self) -> ChainType; /// A list of bootnode addresses. - fn boot_nodes(&self) -> &[String]; + fn boot_nodes(&self) -> &[MultiaddrWithPeerId]; /// Telemetry endpoints (if any) fn telemetry_endpoints(&self) -> &Option; /// Network protocol id. @@ -143,7 +145,7 @@ pub trait ChainSpec: BuildStorage { /// Returns a reference to defined chain spec extensions. fn extensions(&self) -> &dyn GetExtension; /// Add a bootnode to the list. - fn add_boot_node(&mut self, addr: Multiaddr); + fn add_boot_node(&mut self, addr: MultiaddrWithPeerId); /// Return spec as JSON. fn as_json(&self, raw: bool) -> Result; /// Return StorageBuilder for this spec. diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index 908cc4c10eab642e76b1085f08b5ba57415443e7..89247cf016d100792a6ca49e3882529c165472c4 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-cli" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" @@ -20,30 +20,34 @@ ansi_term = "0.12.1" lazy_static = "1.4.0" app_dirs = "1.2.1" tokio = { version = "0.2.9", features = [ "signal", "rt-core", "rt-threaded" ] } -futures = "0.3.1" -fdlimit = "0.1.1" +futures = "0.3.4" +fdlimit = "0.1.4" serde_json = "1.0.41" -sc-informant = { version = "0.8.0-alpha.2", path = "../informant" } -sp-panic-handler = { version = "2.0.0-alpha.2", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } -substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-alpha.2"} -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +sc-informant = { version = "0.8.0-alpha.5", path = "../informant" } +sp-panic-handler = { version = "2.0.0-alpha.5", path = "../../primitives/panic-handler" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } +substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-alpha.5"} +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0-alpha.2", path = "../tracing" } +sc-tracing = { version = "2.0.0-alpha.5", path = "../tracing" } chrono = "0.4.10" -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } [target.'cfg(not(target_os = "unknown"))'.dependencies] rpassword = "4.0.1" +[target.'cfg(target_family = "unix")'.dependencies] +nix = "0.17.0" + [dev-dependencies] tempfile = "3.1.0" @@ -51,3 +55,6 @@ tempfile = "3.1.0" wasmtime = [ "sc-service/wasmtime", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 384087bec0dbe4538c7227e36a6dbce7d789586b..f09a8d8d47392b019675235c42f0bab1a8def14e 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -45,7 +45,9 @@ impl WasmExecutionMethod { impl Into for WasmExecutionMethod { fn into(self) -> sc_service::config::WasmExecutionMethod { match self { - WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, + WasmExecutionMethod::Interpreted => { + sc_service::config::WasmExecutionMethod::Interpreted + } #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, #[cfg(not(feature = "wasmtime"))] diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index 59f7fbb4e95a5363fd03de5f696241c374ee6737..a01101fa7965563795eb3058bf3be0f752801517 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use structopt::StructOpt; -use log::info; -use sc_network::config::build_multiaddr; -use sc_service::{Configuration, ChainSpec}; - use crate::error; -use crate::VersionInfo; -use crate::params::SharedParams; use crate::params::NodeKeyParams; +use crate::params::SharedParams; +use crate::CliConfiguration; +use log::info; +use sc_network::config::build_multiaddr; +use sc_service::{config::MultiaddrWithPeerId, Configuration}; +use structopt::StructOpt; /// The `build-spec` command used to build a specification. #[derive(Debug, StructOpt, Clone)] @@ -49,22 +48,18 @@ pub struct BuildSpecCmd { impl BuildSpecCmd { /// Run the build-spec command - pub fn run( - self, - config: Configuration, - ) -> error::Result<()> { + pub fn run(&self, config: Configuration) -> error::Result<()> { info!("Building chain spec"); - let mut spec = config.chain_spec.expect("`chain_spec` is set to `Some` in `update_config`"); + let mut spec = config.chain_spec; let raw_output = self.raw; if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { let keys = config.network.node_key.into_keypair()?; let peer_id = keys.public().into_peer_id(); - let addr = build_multiaddr![ - Ip4([127, 0, 0, 1]), - Tcp(30333u16), - P2p(peer_id) - ]; + let addr = MultiaddrWithPeerId { + multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], + peer_id, + }; spec.add_boot_node(addr) } @@ -74,25 +69,14 @@ impl BuildSpecCmd { Ok(()) } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - - let net_config_path = config - .in_chain_config_dir(crate::commands::DEFAULT_NETWORK_CONFIG_PATH) - .expect("We provided a base_path"); - - self.node_key_params.update_config(&mut config, Some(&net_config_path))?; +impl CliConfiguration for BuildSpecCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) + fn node_key_params(&self) -> Option<&NodeKeyParams> { + Some(&self.node_key_params) } } - diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index 88248c596925e1e0c43b1d6d3f13b2fc9262ddcd..ac4fe63da950618f25948c9d2aeabe8c96f3e291 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -14,20 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::error; +use crate::params::ImportParams; +use crate::params::SharedParams; +use crate::CliConfiguration; +use sc_service::{Configuration, ServiceBuilderCommand}; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; use std::str::FromStr; use structopt::StructOpt; -use sc_service::{ - Configuration, ServiceBuilderCommand, Roles, ChainSpec, -}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use sp_runtime::generic::BlockId; - -use crate::error; -use crate::VersionInfo; -use crate::runtime::run_until_exit; -use crate::params::SharedParams; -use crate::params::ImportParams; /// The `check-block` command used to validate blocks. #[derive(Debug, StructOpt, Clone)] @@ -53,8 +49,8 @@ pub struct CheckBlockCmd { impl CheckBlockCmd { /// Run the check-block command - pub fn run( - self, + pub async fn run( + &self, config: Configuration, builder: B, ) -> error::Result<()> @@ -65,37 +61,37 @@ impl CheckBlockCmd { <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, ::Hash: std::str::FromStr, { - let input = if self.input.starts_with("0x") { &self.input[2..] } else { &self.input[..] }; + let input = if self.input.starts_with("0x") { + &self.input[2..] + } else { + &self.input[..] + }; let block_id = match FromStr::from_str(input) { Ok(hash) => BlockId::hash(hash), Err(_) => match self.input.parse::() { Ok(n) => BlockId::number((n as u32).into()), - Err(_) => return Err(error::Error::Input("Invalid hash or number specified".into())), - } + Err(_) => { + return Err(error::Error::Input( + "Invalid hash or number specified".into(), + )) + } + }, }; let start = std::time::Instant::now(); - run_until_exit(config, |config| { - Ok(builder(config)?.check_block(block_id)) - })?; + builder(config)?.check_block(block_id).await?; println!("Completed in {} ms.", start.elapsed().as_millis()); Ok(()) } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - self.import_params.update_config(&mut config, Roles::FULL, self.shared_params.dev)?; - config.use_in_memory_keystore()?; +impl CliConfiguration for CheckBlockCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) } } diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 61a63806d21b83b9c6a9d5e8a17c111913fd4c96..48abd409d6833ee758965cae3ef11ec867266c0c 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -14,22 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::io; -use std::fs; -use std::path::PathBuf; -use std::fmt::Debug; +use crate::error; +use crate::params::{BlockNumber, PruningParams, SharedParams}; +use crate::CliConfiguration; use log::info; -use structopt::StructOpt; use sc_service::{ - Configuration, ServiceBuilderCommand, ChainSpec, - config::DatabaseConfig, Roles, + config::DatabaseConfig, Configuration, ServiceBuilderCommand, }; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - -use crate::error; -use crate::VersionInfo; -use crate::runtime::run_until_exit; -use crate::params::{SharedParams, BlockNumber, PruningParams}; +use std::fmt::Debug; +use std::fs; +use std::io; +use std::path::PathBuf; +use structopt::StructOpt; /// The `export-blocks` command used to export blocks. #[derive(Debug, StructOpt, Clone)] @@ -65,8 +62,8 @@ pub struct ExportBlocksCmd { impl ExportBlocksCmd { /// Run the export-blocks command - pub fn run( - self, + pub async fn run( + &self, config: Configuration, builder: B, ) -> error::Result<()> @@ -77,9 +74,10 @@ impl ExportBlocksCmd { <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, ::Hash: std::str::FromStr, { - if let DatabaseConfig::Path { ref path, .. } = config.expect_database() { + if let DatabaseConfig::Path { ref path, .. } = &config.database { info!("DB path: {}", path.display()); } + let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); let to = self.to.as_ref().and_then(|t| t.parse().ok()); @@ -90,24 +88,19 @@ impl ExportBlocksCmd { None => Box::new(io::stdout()), }; - run_until_exit(config, |config| { - Ok(builder(config)?.export_blocks(file, from.into(), to, binary)) - }) + builder(config)? + .export_blocks(file, from.into(), to, binary) + .await + .map_err(Into::into) } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - self.pruning_params.update_config(&mut config, Roles::FULL, true)?; - config.use_in_memory_keystore()?; +impl CliConfiguration for ExportBlocksCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) + fn pruning_params(&self) -> Option<&PruningParams> { + Some(&self.pruning_params) } } diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index b43407add1c7f3c0b36235539fd4272c36772dfa..ce95640f469ceb3f380f07727f4ed5bc09d431ae 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -14,21 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::error; +use crate::params::ImportParams; +use crate::params::SharedParams; +use crate::CliConfiguration; +use sc_service::{Configuration, ServiceBuilderCommand}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; -use std::io::{Read, Seek, self}; use std::fs; +use std::io::{self, Read, Seek}; use std::path::PathBuf; use structopt::StructOpt; -use sc_service::{ - Configuration, ServiceBuilderCommand, ChainSpec, Roles, -}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - -use crate::error; -use crate::VersionInfo; -use crate::runtime::run_until_exit; -use crate::params::SharedParams; -use crate::params::ImportParams; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt, Clone)] @@ -59,8 +55,8 @@ impl ReadPlusSeek for T {} impl ImportBlocksCmd { /// Run the import-blocks command - pub fn run( - self, + pub async fn run( + &self, config: Configuration, builder: B, ) -> error::Result<()> @@ -77,27 +73,22 @@ impl ImportBlocksCmd { let mut buffer = Vec::new(); io::stdin().read_to_end(&mut buffer)?; Box::new(io::Cursor::new(buffer)) - }, + } }; - run_until_exit(config, |config| { - Ok(builder(config)?.import_blocks(file, false)) - }) + builder(config)? + .import_blocks(file, false) + .await + .map_err(Into::into) } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - self.import_params.update_config(&mut config, Roles::FULL, self.shared_params.dev)?; - config.use_in_memory_keystore()?; +impl CliConfiguration for ImportBlocksCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) } } diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index 50e34856f12d61a833d2c7f75742ddaa65f733c3..d05a5464b215da887bd53ec229b9379c6c0ceecc 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -14,34 +14,23 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -mod runcmd; -mod export_blocks_cmd; mod build_spec_cmd; -mod import_blocks_cmd; mod check_block_cmd; -mod revert_cmd; +mod export_blocks_cmd; +mod import_blocks_cmd; mod purge_chain_cmd; +mod revert_cmd; +mod runcmd; -use std::fmt::Debug; -use structopt::StructOpt; - -use sc_service::{ Configuration, ServiceBuilderCommand, ChainSpec }; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - -use crate::error; -use crate::VersionInfo; -use crate::params::SharedParams; - -pub use crate::commands::runcmd::RunCmd; -pub use crate::commands::export_blocks_cmd::ExportBlocksCmd; pub use crate::commands::build_spec_cmd::BuildSpecCmd; -pub use crate::commands::import_blocks_cmd::ImportBlocksCmd; pub use crate::commands::check_block_cmd::CheckBlockCmd; -pub use crate::commands::revert_cmd::RevertCmd; +pub use crate::commands::export_blocks_cmd::ExportBlocksCmd; +pub use crate::commands::import_blocks_cmd::ImportBlocksCmd; pub use crate::commands::purge_chain_cmd::PurgeChainCmd; - -/// default sub directory to store network config -const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network"; +pub use crate::commands::revert_cmd::RevertCmd; +pub use crate::commands::runcmd::RunCmd; +use std::fmt::Debug; +use structopt::StructOpt; /// All core commands that are provided by default. /// @@ -50,90 +39,339 @@ const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network"; /// `Run` are exported as main executable parameters. #[derive(Debug, Clone, StructOpt)] pub enum Subcommand { - /// Build a spec.json file, outputing to stdout. - BuildSpec(build_spec_cmd::BuildSpecCmd), + /// Build a spec.json file, outputs to stdout. + BuildSpec(BuildSpecCmd), /// Export blocks to a file. - ExportBlocks(export_blocks_cmd::ExportBlocksCmd), + ExportBlocks(ExportBlocksCmd), /// Import blocks from file. - ImportBlocks(import_blocks_cmd::ImportBlocksCmd), + ImportBlocks(ImportBlocksCmd), /// Validate a single block. - CheckBlock(check_block_cmd::CheckBlockCmd), + CheckBlock(CheckBlockCmd), /// Revert chain to the previous state. - Revert(revert_cmd::RevertCmd), + Revert(RevertCmd), /// Remove the whole chain data. - PurgeChain(purge_chain_cmd::PurgeChainCmd), + PurgeChain(PurgeChainCmd), } -impl Subcommand { - /// Get the shared parameters of a `CoreParams` command - pub fn get_shared_params(&self) -> &SharedParams { - use Subcommand::*; - - match self { - BuildSpec(params) => ¶ms.shared_params, - ExportBlocks(params) => ¶ms.shared_params, - ImportBlocks(params) => ¶ms.shared_params, - CheckBlock(params) => ¶ms.shared_params, - Revert(params) => ¶ms.shared_params, - PurgeChain(params) => ¶ms.shared_params, - } - } +// TODO: move to config.rs? +/// Macro that helps implement CliConfiguration on an enum of subcommand automatically +/// +/// # Example +/// +/// ``` +/// # #[macro_use] extern crate sc_cli; +/// +/// # struct EmptyVariant {} +/// +/// # impl sc_cli::CliConfiguration for EmptyVariant { +/// # fn shared_params(&self) -> &sc_cli::SharedParams { unimplemented!() } +/// # fn chain_id(&self, _: bool) -> sc_cli::Result { Ok("test-chain-id".to_string()) } +/// # } +/// +/// # fn main() { +/// enum Subcommand { +/// Variant1(EmptyVariant), +/// Variant2(EmptyVariant), +/// } +/// +/// substrate_cli_subcommands!( +/// Subcommand => Variant1, Variant2 +/// ); +/// +/// # use sc_cli::CliConfiguration; +/// # assert_eq!(Subcommand::Variant1(EmptyVariant {}).chain_id(false).unwrap(), "test-chain-id"); +/// +/// # } +/// ``` +/// +/// Which will expand to: +/// +/// ```ignore +/// impl CliConfiguration for Subcommand { +/// fn base_path(&self) -> Result> { +/// match self { +/// Subcommand::Variant1(cmd) => cmd.base_path(), +/// Subcommand::Variant2(cmd) => cmd.base_path(), +/// } +/// } +/// +/// fn is_dev(&self) -> Result { +/// match self { +/// Subcommand::Variant1(cmd) => cmd.is_dev(), +/// Subcommand::Variant2(cmd) => cmd.is_dev(), +/// } +/// } +/// +/// // ... +/// } +/// ``` +#[macro_export] +macro_rules! substrate_cli_subcommands { + ($enum:ident => $($variant:ident),*) => { + impl $crate::CliConfiguration for $enum { + fn shared_params(&self) -> &$crate::SharedParams { + match self { + $($enum::$variant(cmd) => cmd.shared_params()),* + } + } - /// Run any `CoreParams` command - pub fn run( - self, - config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - match self { - Subcommand::BuildSpec(cmd) => cmd.run(config), - Subcommand::ExportBlocks(cmd) => cmd.run(config, builder), - Subcommand::ImportBlocks(cmd) => cmd.run(config, builder), - Subcommand::CheckBlock(cmd) => cmd.run(config, builder), - Subcommand::PurgeChain(cmd) => cmd.run(config), - Subcommand::Revert(cmd) => cmd.run(config, builder), - } - } + fn import_params(&self) -> Option<&$crate::ImportParams> { + match self { + $($enum::$variant(cmd) => cmd.import_params()),* + } + } - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - match self { - Subcommand::BuildSpec(cmd) => cmd.update_config(&mut config, spec_factory, version), - Subcommand::ExportBlocks(cmd) => cmd.update_config(&mut config, spec_factory, version), - Subcommand::ImportBlocks(cmd) => cmd.update_config(&mut config, spec_factory, version), - Subcommand::CheckBlock(cmd) => cmd.update_config(&mut config, spec_factory, version), - Subcommand::PurgeChain(cmd) => cmd.update_config(&mut config, spec_factory, version), - Subcommand::Revert(cmd) => cmd.update_config(&mut config, spec_factory, version), - } - } + fn pruning_params(&self) -> Option<&$crate::PruningParams> { + match self { + $($enum::$variant(cmd) => cmd.pruning_params()),* + } + } + + fn keystore_params(&self) -> Option<&$crate::KeystoreParams> { + match self { + $($enum::$variant(cmd) => cmd.keystore_params()),* + } + } + + fn network_params(&self) -> Option<&$crate::NetworkParams> { + match self { + $($enum::$variant(cmd) => cmd.network_params()),* + } + } + + fn base_path(&self) -> $crate::Result<::std::option::Option<::std::path::PathBuf>> { + match self { + $($enum::$variant(cmd) => cmd.base_path()),* + } + } + + fn is_dev(&self) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.is_dev()),* + } + } + + fn role(&self, is_dev: bool) -> $crate::Result<::sc_service::Role> { + match self { + $($enum::$variant(cmd) => cmd.role(is_dev)),* + } + } + + fn transaction_pool(&self) + -> $crate::Result<::sc_service::config::TransactionPoolOptions> { + match self { + $($enum::$variant(cmd) => cmd.transaction_pool()),* + } + } + + fn network_config( + &self, + chain_spec: &::std::boxed::Box, + is_dev: bool, + net_config_dir: &::std::path::PathBuf, + client_id: &str, + node_name: &str, + node_key: ::sc_service::config::NodeKeyConfig, + ) -> $crate::Result<::sc_service::config::NetworkConfiguration> { + match self { + $( + $enum::$variant(cmd) => cmd.network_config( + chain_spec, is_dev, net_config_dir, client_id, node_name, node_key + ) + ),* + } + } + + fn keystore_config(&self, base_path: &::std::path::PathBuf) + -> $crate::Result<::sc_service::config::KeystoreConfig> { + match self { + $($enum::$variant(cmd) => cmd.keystore_config(base_path)),* + } + } + + fn database_cache_size(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.database_cache_size()),* + } + } + + fn database_config( + &self, + base_path: &::std::path::PathBuf, + cache_size: usize, + ) -> $crate::Result<::sc_service::config::DatabaseConfig> { + match self { + $($enum::$variant(cmd) => cmd.database_config(base_path, cache_size)),* + } + } + + fn state_cache_size(&self) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.state_cache_size()),* + } + } + + fn state_cache_child_ratio(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.state_cache_child_ratio()),* + } + } + + fn pruning(&self, is_dev: bool, role: &::sc_service::Role) + -> $crate::Result<::sc_service::config::PruningMode> { + match self { + $($enum::$variant(cmd) => cmd.pruning(is_dev, role)),* + } + } + + fn chain_id(&self, is_dev: bool) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.chain_id(is_dev)),* + } + } + + fn init(&self) -> $crate::Result<()> { + match self { + $($enum::$variant(cmd) => cmd.init::()),* + } + } + + fn node_name(&self) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.node_name()),* + } + } + + fn wasm_method(&self) -> $crate::Result<::sc_service::config::WasmExecutionMethod> { + match self { + $($enum::$variant(cmd) => cmd.wasm_method()),* + } + } - /// Initialize substrate. This must be done only once. - /// - /// This method: - /// - /// 1. Set the panic handler - /// 2. Raise the FD limit - /// 3. Initialize the logger - pub fn init(&self, version: &VersionInfo) -> error::Result<()> { - self.get_shared_params().init(version) + fn execution_strategies(&self, is_dev: bool) + -> $crate::Result<::sc_service::config::ExecutionStrategies> { + match self { + $($enum::$variant(cmd) => cmd.execution_strategies(is_dev)),* + } + } + + fn rpc_http(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { + match self { + $($enum::$variant(cmd) => cmd.rpc_http()),* + } + } + + fn rpc_ws(&self) -> $crate::Result<::std::option::Option<::std::net::SocketAddr>> { + match self { + $($enum::$variant(cmd) => cmd.rpc_ws()),* + } + } + + fn rpc_ws_max_connections(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.rpc_ws_max_connections()),* + } + } + + fn rpc_cors(&self, is_dev: bool) + -> $crate::Result<::std::option::Option<::std::vec::Vec>> { + match self { + $($enum::$variant(cmd) => cmd.rpc_cors(is_dev)),* + } + } + + fn prometheus_config(&self) + -> $crate::Result<::std::option::Option<::sc_service::config::PrometheusConfig>> { + match self { + $($enum::$variant(cmd) => cmd.prometheus_config()),* + } + } + + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> $crate::Result<::std::option::Option<::sc_service::config::TelemetryEndpoints>> { + match self { + $($enum::$variant(cmd) => cmd.telemetry_endpoints(chain_spec)),* + } + } + + fn telemetry_external_transport(&self) + -> $crate::Result<::std::option::Option<::sc_service::config::ExtTransport>> { + match self { + $($enum::$variant(cmd) => cmd.telemetry_external_transport()),* + } + } + + fn default_heap_pages(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.default_heap_pages()),* + } + } + + fn offchain_worker(&self, role: &::sc_service::Role) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.offchain_worker(role)),* + } + } + + fn force_authoring(&self) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.force_authoring()),* + } + } + + fn disable_grandpa(&self) -> $crate::Result { + match self { + $($enum::$variant(cmd) => cmd.disable_grandpa()),* + } + } + + fn dev_key_seed(&self, is_dev: bool) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.dev_key_seed(is_dev)),* + } + } + + fn tracing_targets(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.tracing_targets()),* + } + } + + fn tracing_receiver(&self) -> $crate::Result<::sc_service::TracingReceiver> { + match self { + $($enum::$variant(cmd) => cmd.tracing_receiver()),* + } + } + + fn node_key(&self, net_config_dir: &::std::path::PathBuf) + -> $crate::Result<::sc_service::config::NodeKeyConfig> { + match self { + $($enum::$variant(cmd) => cmd.node_key(net_config_dir)),* + } + } + + fn max_runtime_instances(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.max_runtime_instances()),* + } + } + + fn log_filters(&self) -> $crate::Result<::std::option::Option> { + match self { + $($enum::$variant(cmd) => cmd.log_filters()),* + } + } + } } } + +substrate_cli_subcommands!( + Subcommand => BuildSpec, ExportBlocks, ImportBlocks, CheckBlock, Revert, PurgeChain +); diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index e12a50bf24f174ccfcc868fa4099bdf11fb45292..845423695eae8ee9b2229e28a8e499579a288f0b 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -14,15 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::error; +use crate::params::SharedParams; +use crate::CliConfiguration; +use sc_service::{config::DatabaseConfig, Configuration}; use std::fmt::Debug; -use std::io::{Write, self}; use std::fs; +use std::io::{self, Write}; use structopt::StructOpt; -use sc_service::{ Configuration, ChainSpec, config::{DatabaseConfig} }; - -use crate::error; -use crate::VersionInfo; -use crate::params::SharedParams; /// The `purge-chain` command used to remove the whole chain. #[derive(Debug, StructOpt, Clone)] @@ -38,11 +37,8 @@ pub struct PurgeChainCmd { impl PurgeChainCmd { /// Run the purge command - pub fn run( - self, - config: Configuration, - ) -> error::Result<()> { - let db_path = match config.expect_database() { + pub fn run(&self, config: Configuration) -> error::Result<()> { + let db_path = match &config.database { DatabaseConfig::Path { path, .. } => path, _ => { eprintln!("Cannot purge custom database implementation"); @@ -76,22 +72,13 @@ impl PurgeChainCmd { eprintln!("{:?} did not exist.", &db_path); Ok(()) }, - Err(err) => Result::Err(err.into()) + Err(err) => Result::Err(err.into()), } } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - config.use_in_memory_keystore()?; - - Ok(()) +impl CliConfiguration for PurgeChainCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params } } diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index 8eba199dff07ba4b8f518e69842a4053ed0e8382..f7629ff2f6357e71ed417b0bad6d6ad1fc96851b 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -14,16 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::error; +use crate::params::{BlockNumber, PruningParams, SharedParams}; +use crate::CliConfiguration; +use sc_service::{Configuration, ServiceBuilderCommand}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; use structopt::StructOpt; -use sc_service::{ - Configuration, ServiceBuilderCommand, ChainSpec, Roles, -}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; - -use crate::error; -use crate::VersionInfo; -use crate::params::{BlockNumber, SharedParams, PruningParams}; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt, Clone)] @@ -43,11 +40,7 @@ pub struct RevertCmd { impl RevertCmd { /// Run the revert command - pub fn run( - self, - config: Configuration, - builder: B, - ) -> error::Result<()> + pub fn run(&self, config: Configuration, builder: B) -> error::Result<()> where B: FnOnce(Configuration) -> Result, BC: ServiceBuilderCommand + Unpin, @@ -60,20 +53,14 @@ impl RevertCmd { Ok(()) } +} - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - self.pruning_params.update_config(&mut config, Roles::FULL, true)?; - config.use_in_memory_keystore()?; +impl CliConfiguration for RevertCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - Ok(()) + fn pruning_params(&self) -> Option<&PruningParams> { + Some(&self.pruning_params) } } diff --git a/client/cli/src/commands/runcmd.rs b/client/cli/src/commands/runcmd.rs index 354448636342802dfe84699ca3d6fa517aef931c..b3ce6ce6d119248a0b937c501328fa68e2fa8a6a 100644 --- a/client/cli/src/commands/runcmd.rs +++ b/client/cli/src/commands/runcmd.rs @@ -14,33 +14,21 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::path::PathBuf; -use std::net::SocketAddr; -use std::fs; -use log::info; -use structopt::{StructOpt, clap::arg_enum}; -use names::{Generator, Name}; +use crate::error::{Error, Result}; +use crate::params::ImportParams; +use crate::params::KeystoreParams; +use crate::params::NetworkParams; +use crate::params::SharedParams; +use crate::params::TransactionPoolParams; +use crate::CliConfiguration; use regex::Regex; -use chrono::prelude::*; use sc_service::{ - AbstractService, Configuration, ChainSpec, Roles, - config::{KeystoreConfig, PrometheusConfig}, + config::{MultiaddrWithPeerId, PrometheusConfig, TransactionPoolOptions}, + ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; - -use crate::VersionInfo; -use crate::error; -use crate::params::ImportParams; -use crate::params::SharedParams; -use crate::params::NetworkConfigurationParams; -use crate::params::TransactionPoolParams; -use crate::runtime::run_service_until_exit; - -/// The maximum number of characters for a node name. -const NODE_NAME_MAX_LENGTH: usize = 32; - -/// default sub directory for the key store -const DEFAULT_KEYSTORE_CONFIG_PATH : &'static str = "keystore"; +use std::net::SocketAddr; +use structopt::{clap::arg_enum, StructOpt}; arg_enum! { /// Whether off-chain workers are enabled. @@ -78,11 +66,12 @@ pub struct RunCmd { /// available to relay to private nodes. #[structopt( long = "sentry", - conflicts_with_all = &[ "validator", "light" ] + conflicts_with_all = &[ "validator", "light" ], + parse(try_from_str) )] - pub sentry: bool, + pub sentry: Vec, - /// Disable GRANDPA voter when running in validator mode, otherwise disables the GRANDPA observer. + /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. #[structopt(long = "no-grandpa")] pub no_grandpa: bool, @@ -92,7 +81,7 @@ pub struct RunCmd { /// Listen to all RPC interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. #[structopt(long = "rpc-external")] @@ -106,7 +95,7 @@ pub struct RunCmd { /// Listen to all Websocket interfaces. /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. #[structopt(long = "ws-external")] @@ -140,7 +129,7 @@ pub struct RunCmd { /// /// A comma-separated list of origins (protocol://domain or special `null` /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and https://polkadot.js.org origins. When running in + /// allow localhost and https://polkadot.js.org origins. When running in /// --dev mode the default is to allow all origins. #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] pub rpc_cors: Option, @@ -169,10 +158,10 @@ pub struct RunCmd { /// The URL of the telemetry server to connect to. /// - /// This flag can be passed multiple times as a mean to specify multiple + /// This flag can be passed multiple times as a means to specify multiple /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting - /// the least verbosity. If no verbosity level is specified the default is - /// 0. + /// the least verbosity. + /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] pub telemetry_endpoints: Vec<(String, u8)>, @@ -198,7 +187,7 @@ pub struct RunCmd { #[allow(missing_docs)] #[structopt(flatten)] - pub network_config: NetworkConfigurationParams, + pub network_params: NetworkParams, #[allow(missing_docs)] #[structopt(flatten)] @@ -240,259 +229,227 @@ pub struct RunCmd { #[structopt(long = "force-authoring")] pub force_authoring: bool, - /// Specify custom keystore path. - #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] - pub keystore_path: Option, - - /// Use interactive shell for entering the password used by the keystore. - #[structopt( - long = "password-interactive", - conflicts_with_all = &[ "password", "password-filename" ] - )] - pub password_interactive: bool, - - /// Password used by the keystore. - #[structopt( - long = "password", - conflicts_with_all = &[ "password-interactive", "password-filename" ] - )] - pub password: Option, - - /// File that contains the password used by the keystore. - #[structopt( - long = "password-filename", - value_name = "PATH", - parse(from_os_str), - conflicts_with_all = &[ "password-interactive", "password" ] - )] - pub password_filename: Option, + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, /// The size of the instances cache for each runtime. /// /// The default value is 8 and the values higher than 256 are ignored. - #[structopt(long = "max-runtime-instances", default_value = "8")] - pub max_runtime_instances: usize, + #[structopt(long)] + pub max_runtime_instances: Option, + + /// Specify a list of sentry node public addresses. + #[structopt( + long = "sentry-nodes", + value_name = "ADDR", + conflicts_with_all = &[ "sentry" ] + )] + pub sentry_nodes: Vec, } impl RunCmd { - /// Get the `Sr25519Keyring` matching one of the flag + /// Get the `Sr25519Keyring` matching one of the flag. pub fn get_keyring(&self) -> Option { use sp_keyring::Sr25519Keyring::*; - if self.alice { Some(Alice) } - else if self.bob { Some(Bob) } - else if self.charlie { Some(Charlie) } - else if self.dave { Some(Dave) } - else if self.eve { Some(Eve) } - else if self.ferdie { Some(Ferdie) } - else if self.one { Some(One) } - else if self.two { Some(Two) } - else { None } - } - - /// Update and prepare a `Configuration` with command line parameters of `RunCmd` and `VersionInfo` - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<()> - where - F: FnOnce(&str) -> Result, String>, - { - self.shared_params.update_config(&mut config, spec_factory, version)?; - - let password = if self.password_interactive { - #[cfg(not(target_os = "unknown"))] - { - Some(input_keystore_password()?.into()) - } - #[cfg(target_os = "unknown")] - None - } else if let Some(ref file) = self.password_filename { - Some(fs::read_to_string(file).map_err(|e| format!("{}", e))?.into()) - } else if let Some(ref password) = self.password { - Some(password.clone().into()) + if self.alice { + Some(Alice) + } else if self.bob { + Some(Bob) + } else if self.charlie { + Some(Charlie) + } else if self.dave { + Some(Dave) + } else if self.eve { + Some(Eve) + } else if self.ferdie { + Some(Ferdie) + } else if self.one { + Some(One) + } else if self.two { + Some(Two) } else { None - }; + } + } +} - let path = self.keystore_path.clone().or( - config.in_chain_config_dir(DEFAULT_KEYSTORE_CONFIG_PATH) - ); +impl CliConfiguration for RunCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - config.keystore = KeystoreConfig::Path { - path: path.ok_or_else(|| "No `base_path` provided to create keystore path!".to_string())?, - password, - }; + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } - let keyring = self.get_keyring(); - let is_dev = self.shared_params.dev; - let is_light = self.light; - let is_authority = (self.validator || self.sentry || is_dev || keyring.is_some()) - && !is_light; - let role = - if is_light { - sc_service::Roles::LIGHT - } else if is_authority { - sc_service::Roles::AUTHORITY - } else { - sc_service::Roles::FULL - }; + fn network_params(&self) -> Option<&NetworkParams> { + Some(&self.network_params) + } - self.import_params.update_config(&mut config, role, is_dev)?; + fn keystore_params(&self) -> Option<&KeystoreParams> { + Some(&self.keystore_params) + } - config.name = match (self.name.as_ref(), keyring) { + fn node_name(&self) -> Result { + let name: String = match (self.name.as_ref(), self.get_keyring()) { (Some(name), _) => name.to_string(), (_, Some(keyring)) => keyring.to_string(), - (None, None) => generate_node_name(), + (None, None) => crate::generate_node_name(), }; - if let Err(msg) = is_node_name_valid(&config.name) { - return Err(error::Error::Input( - format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", - config.name, - msg, - ) + + is_node_name_valid(&name).map_err(|msg| { + Error::Input(format!( + "Invalid node name '{}'. Reason: {}. If unsure, use none.", + name, msg )); - } + })?; - // set sentry mode (i.e. act as an authority but **never** actively participate) - config.sentry_mode = self.sentry; + Ok(name) + } - config.offchain_worker = match (&self.offchain_worker, role) { - (OffchainWorkerEnabled::WhenValidating, sc_service::Roles::AUTHORITY) => true, - (OffchainWorkerEnabled::Always, _) => true, - (OffchainWorkerEnabled::Never, _) => false, - (OffchainWorkerEnabled::WhenValidating, _) => false, - }; + fn dev_key_seed(&self, is_dev: bool) -> Result> { + Ok(self.get_keyring().map(|a| format!("//{}", a)).or_else(|| { + if is_dev && !self.light { + Some("//Alice".into()) + } else { + None + } + })) + } - config.roles = role; - config.disable_grandpa = self.no_grandpa; - - let client_id = config.client_id(); - let network_path = config - .in_chain_config_dir(crate::commands::DEFAULT_NETWORK_CONFIG_PATH) - .expect("We provided a basepath"); - self.network_config.update_config( - &mut config, - network_path, - client_id, - is_dev, - )?; - - self.pool_config.update_config(&mut config)?; - - config.dev_key_seed = keyring - .map(|a| format!("//{}", a)).or_else(|| { - if is_dev && !is_light { - Some("//Alice".into()) - } else { - None - } - }); + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> Result> { + Ok(if self.no_telemetry { + None + } else if !self.telemetry_endpoints.is_empty() { + Some( + TelemetryEndpoints::new(self.telemetry_endpoints.clone()) + .map_err(|e| e.to_string())?, + ) + } else { + chain_spec.telemetry_endpoints().clone() + }) + } - if config.rpc_http.is_none() || self.rpc_port.is_some() { - let rpc_interface: &str = interface_str(self.rpc_external, self.unsafe_rpc_external, self.validator)?; - config.rpc_http = Some(parse_address(&format!("{}:{}", rpc_interface, 9933), self.rpc_port)?); - } - if config.rpc_ws.is_none() || self.ws_port.is_some() { - let ws_interface: &str = interface_str(self.ws_external, self.unsafe_ws_external, self.validator)?; - config.rpc_ws = Some(parse_address(&format!("{}:{}", ws_interface, 9944), self.ws_port)?); - } + fn role(&self, is_dev: bool) -> Result { + let keyring = self.get_keyring(); + let is_light = self.light; + let is_authority = (self.validator || is_dev || keyring.is_some()) && !is_light; - config.rpc_ws_max_connections = self.ws_max_connections; - config.rpc_cors = self.rpc_cors.clone().unwrap_or_else(|| if is_dev { - log::warn!("Running in --dev mode, RPC CORS has been disabled."); - Cors::All + Ok(if is_light { + sc_service::Role::Light + } else if is_authority { + sc_service::Role::Authority { + sentry_nodes: self.sentry_nodes.clone(), + } + } else if !self.sentry.is_empty() { + sc_service::Role::Sentry { + validators: self.sentry.clone(), + } } else { - Cors::List(vec![ - "http://localhost:*".into(), - "http://127.0.0.1:*".into(), - "https://localhost:*".into(), - "https://127.0.0.1:*".into(), - "https://polkadot.js.org".into(), - ]) - }).into(); - - // Override telemetry - if self.no_telemetry { - config.telemetry_endpoints = None; - } else if !self.telemetry_endpoints.is_empty() { - config.telemetry_endpoints = Some( - TelemetryEndpoints::new(self.telemetry_endpoints.clone()) - ); - } + sc_service::Role::Full + }) + } - // Override prometheus + fn force_authoring(&self) -> Result { + // Imply forced authoring on --dev + Ok(self.shared_params.dev || self.force_authoring) + } + + fn prometheus_config(&self) -> Result> { if self.no_prometheus { - config.prometheus_config = None; - } else if config.prometheus_config.is_none() { - let prometheus_interface: &str = if self.prometheus_external { "0.0.0.0" } else { "127.0.0.1" }; - config.prometheus_config = Some(PrometheusConfig::new_with_default_registry( - parse_address(&format!("{}:{}", prometheus_interface, 9615), self.prometheus_port)?, - )); + Ok(None) + } else { + let prometheus_interface: &str = if self.prometheus_external { + "0.0.0.0" + } else { + "127.0.0.1" + }; + + Ok(Some(PrometheusConfig::new_with_default_registry( + parse_address( + &format!("{}:{}", prometheus_interface, 9615), + self.prometheus_port, + )?, + ))) } + } - config.tracing_targets = self.import_params.tracing_targets.clone().into(); - config.tracing_receiver = self.import_params.tracing_receiver.clone().into(); + fn disable_grandpa(&self) -> Result { + Ok(self.no_grandpa) + } - // Imply forced authoring on --dev - config.force_authoring = self.shared_params.dev || self.force_authoring; - - config.max_runtime_instances = self.max_runtime_instances.min(256); - - Ok(()) - } - - /// Run the command that runs the node - pub fn run( - self, - config: Configuration, - new_light: FNL, - new_full: FNF, - version: &VersionInfo, - ) -> error::Result<()> - where - FNL: FnOnce(Configuration) -> Result, - FNF: FnOnce(Configuration) -> Result, - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, - { - info!("{}", version.name); - info!(" version {}", config.full_version()); - info!(" by {}, {}-{}", version.author, version.copyright_start_year, Local::today().year()); - info!("Chain specification: {}", config.expect_chain_spec().name()); - info!("Node name: {}", config.name); - info!("Roles: {}", config.display_role()); - - match config.roles { - Roles::LIGHT => run_service_until_exit( - config, - new_light, - ), - _ => run_service_until_exit( - config, - new_full, - ), - } + fn rpc_ws_max_connections(&self) -> Result> { + Ok(self.ws_max_connections) } - /// Initialize substrate. This must be done only once. - /// - /// This method: - /// - /// 1. Set the panic handler - /// 2. Raise the FD limit - /// 3. Initialize the logger - pub fn init(&self, version: &VersionInfo) -> error::Result<()> { - self.shared_params.init(version) + fn rpc_cors(&self, is_dev: bool) -> Result>> { + Ok(self + .rpc_cors + .clone() + .unwrap_or_else(|| { + if is_dev { + log::warn!("Running in --dev mode, RPC CORS has been disabled."); + Cors::All + } else { + Cors::List(vec![ + "http://localhost:*".into(), + "http://127.0.0.1:*".into(), + "https://localhost:*".into(), + "https://127.0.0.1:*".into(), + "https://polkadot.js.org".into(), + ]) + } + }) + .into()) + } + + fn rpc_http(&self) -> Result> { + let rpc_interface: &str = + interface_str(self.rpc_external, self.unsafe_rpc_external, self.validator)?; + + Ok(Some(parse_address( + &format!("{}:{}", rpc_interface, 9933), + self.rpc_port, + )?)) + } + + fn rpc_ws(&self) -> Result> { + let ws_interface: &str = + interface_str(self.ws_external, self.unsafe_ws_external, self.validator)?; + + Ok(Some(parse_address( + &format!("{}:{}", ws_interface, 9944), + self.ws_port, + )?)) + } + + fn offchain_worker(&self, role: &Role) -> Result { + Ok(match (&self.offchain_worker, role) { + (OffchainWorkerEnabled::WhenValidating, Role::Authority { .. }) => true, + (OffchainWorkerEnabled::Always, _) => true, + (OffchainWorkerEnabled::Never, _) => false, + (OffchainWorkerEnabled::WhenValidating, _) => false, + }) + } + + fn transaction_pool(&self) -> Result { + Ok(self.pool_config.transaction_pool()) + } + + fn max_runtime_instances(&self) -> Result> { + Ok(self.max_runtime_instances.map(|x| x.min(256))) } } -/// Check whether a node name is considered as valid -pub fn is_node_name_valid(_name: &str) -> Result<(), &str> { +/// Check whether a node name is considered as valid. +pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { let name = _name.to_string(); - if name.chars().count() >= NODE_NAME_MAX_LENGTH { + if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { return Err("Node name too long"); } @@ -511,32 +468,10 @@ pub fn is_node_name_valid(_name: &str) -> Result<(), &str> { Ok(()) } -#[cfg(not(target_os = "unknown"))] -fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e)) -} - -fn generate_node_name() -> String { - let result = loop { - let node_name = Generator::with_naming(Name::Numbered).next().unwrap(); - let count = node_name.chars().count(); - - if count < NODE_NAME_MAX_LENGTH { - break node_name - } - }; - - result -} - -fn parse_address( - address: &str, - port: Option, -) -> Result { - let mut address: SocketAddr = address.parse().map_err( - |_| format!("Invalid address: {}", address) - )?; +fn parse_address(address: &str, port: Option) -> std::result::Result { + let mut address: SocketAddr = address + .parse() + .map_err(|_| format!("Invalid address: {}", address))?; if let Some(port) = port { address.set_port(port); } @@ -548,16 +483,21 @@ fn interface_str( is_external: bool, is_unsafe_external: bool, is_validator: bool, -) -> Result<&'static str, error::Error> { +) -> Result<&'static str> { if is_external && is_validator { - return Err(error::Error::Input("--rpc-external and --ws-external options shouldn't be \ + return Err(Error::Input( + "--rpc-external and --ws-external options shouldn't be \ used if the node is running as a validator. Use `--unsafe-rpc-external` if you understand \ - the risks. See the options description for more information.".to_owned())); + the risks. See the options description for more information." + .to_owned(), + )); } if is_external || is_unsafe_external { - log::warn!("It isn't safe to expose RPC publicly without a proxy server that filters \ - available set of RPC methods."); + log::warn!( + "It isn't safe to expose RPC publicly without a proxy server that filters \ + available set of RPC methods." + ); Ok("0.0.0.0") } else { @@ -565,16 +505,32 @@ fn interface_str( } } -/// Default to verbosity level 0, if none is provided. -fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), Box> { +#[derive(Debug)] +enum TelemetryParsingError { + MissingVerbosity, + VerbosityParsingError(std::num::ParseIntError), +} + +impl std::error::Error for TelemetryParsingError {} + +impl std::fmt::Display for TelemetryParsingError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &*self { + TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), + TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), + } + } +} + +fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), TelemetryParsingError> { let pos = s.find(' '); match pos { - None => { - Ok((s.to_owned(), 0)) - }, + None => Err(TelemetryParsingError::MissingVerbosity), Some(pos_) => { - let verbosity = s[pos_ + 1..].parse()?; - let url = s[..pos_].parse()?; + let url = s[..pos_].to_string(); + let verbosity = s[pos_ + 1..] + .parse() + .map_err(TelemetryParsingError::VerbosityParsingError)?; Ok((url, verbosity)) } } @@ -586,7 +542,7 @@ fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), Box), @@ -601,8 +557,8 @@ impl From for Option> { } } -/// Parse cors origins -fn parse_cors(s: &str) -> Result> { +/// Parse cors origins. +fn parse_cors(s: &str) -> std::result::Result> { let mut is_all = false; let mut origins = Vec::new(); for part in s.split(',') { @@ -610,29 +566,21 @@ fn parse_cors(s: &str) -> Result> { "all" | "*" => { is_all = true; break; - }, + } other => origins.push(other.to_owned()), } } - Ok(if is_all { Cors::All } else { Cors::List(origins) }) + Ok(if is_all { + Cors::All + } else { + Cors::List(origins) + }) } #[cfg(test)] mod tests { use super::*; - use sc_service::{GenericChainSpec, config::DatabaseConfig}; - - const TEST_VERSION_INFO: &'static VersionInfo = &VersionInfo { - name: "node-test", - version: "0.1.0", - commit: "some_commit", - executable_name: "node-test", - description: "description", - author: "author", - support_url: "http://example.org", - copyright_start_year: 2020, - }; #[test] fn tests_node_name_good() { @@ -648,93 +596,4 @@ mod tests { assert!(is_node_name_valid("www.visit.me").is_err()); assert!(is_node_name_valid("email@domain").is_err()); } - - #[test] - fn keystore_path_is_generated_correctly() { - let chain_spec = GenericChainSpec::from_genesis( - "test", - "test-id", - || (), - Vec::new(), - None, - None, - None, - None::<()>, - ); - - for keystore_path in vec![None, Some("/keystore/path")] { - let args: Vec<&str> = vec![]; - let mut cli = RunCmd::from_iter(args); - cli.keystore_path = keystore_path.clone().map(PathBuf::from); - - let mut config = Configuration::default(); - config.config_dir = Some(PathBuf::from("/test/path")); - config.chain_spec = Some(Box::new(chain_spec.clone())); - let chain_spec = chain_spec.clone(); - cli.update_config(&mut config, move |_| Ok(Box::new(chain_spec)), TEST_VERSION_INFO).unwrap(); - - let expected_path = match keystore_path { - Some(path) => PathBuf::from(path), - None => PathBuf::from("/test/path/chains/test-id/keystore"), - }; - - assert_eq!(expected_path, config.keystore.path().unwrap().to_owned()); - } - } - - #[test] - fn ensure_load_spec_provide_defaults() { - let chain_spec = GenericChainSpec::from_genesis( - "test", - "test-id", - || (), - vec!["boo".to_string()], - Some(TelemetryEndpoints::new(vec![("foo".to_string(), 42)])), - None, - None, - None::<()>, - ); - - let args: Vec<&str> = vec![]; - let cli = RunCmd::from_iter(args); - - let mut config = Configuration::from_version(TEST_VERSION_INFO); - cli.update_config(&mut config, |_| Ok(Box::new(chain_spec)), TEST_VERSION_INFO).unwrap(); - - assert!(config.chain_spec.is_some()); - assert!(!config.network.boot_nodes.is_empty()); - assert!(config.telemetry_endpoints.is_some()); - } - - #[test] - fn ensure_update_config_for_running_node_provides_defaults() { - let chain_spec = GenericChainSpec::from_genesis( - "test", - "test-id", - || (), - vec![], - None, - None, - None, - None::<()>, - ); - - let args: Vec<&str> = vec![]; - let cli = RunCmd::from_iter(args); - - let mut config = Configuration::from_version(TEST_VERSION_INFO); - cli.init(&TEST_VERSION_INFO).unwrap(); - cli.update_config(&mut config, |_| Ok(Box::new(chain_spec)), TEST_VERSION_INFO).unwrap(); - - assert!(config.config_dir.is_some()); - assert!(config.database.is_some()); - if let Some(DatabaseConfig::Path { ref cache_size, .. }) = config.database { - assert!(cache_size.is_some()); - } else { - panic!("invalid config.database variant"); - } - assert!(!config.name.is_empty()); - assert!(config.network.config_path.is_some()); - assert!(!config.network.listen_addresses.is_empty()); - } } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs new file mode 100644 index 0000000000000000000000000000000000000000..6c4cc0e7103f9147711a609dd14dd9f64f9d98c5 --- /dev/null +++ b/client/cli/src/config.rs @@ -0,0 +1,462 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Configuration trait for a CLI based on substrate + +use crate::error::Result; +use crate::{ + init_logger, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, + PruningParams, SharedParams, SubstrateCli, +}; +use app_dirs::{AppDataType, AppInfo}; +use names::{Generator, Name}; +use sc_service::config::{ + Configuration, DatabaseConfig, ExecutionStrategies, ExtTransport, KeystoreConfig, + NetworkConfiguration, NodeKeyConfig, PrometheusConfig, PruningMode, Role, TelemetryEndpoints, + TransactionPoolOptions, WasmExecutionMethod, +}; +use sc_service::{ChainSpec, TracingReceiver}; +use std::future::Future; +use std::net::SocketAddr; +use std::path::PathBuf; +use std::pin::Pin; +use std::sync::Arc; + +/// The maximum number of characters for a node name. +pub(crate) const NODE_NAME_MAX_LENGTH: usize = 32; + +/// default sub directory to store network config +pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &'static str = "network"; + +/// A trait that allows converting an object to a Configuration +pub trait CliConfiguration: Sized { + /// Get the SharedParams for this object + fn shared_params(&self) -> &SharedParams; + + /// Get the ImportParams for this object + fn import_params(&self) -> Option<&ImportParams> { + None + } + + /// Get the PruningParams for this object + fn pruning_params(&self) -> Option<&PruningParams> { + self.import_params().map(|x| &x.pruning_params) + } + + /// Get the KeystoreParams for this object + fn keystore_params(&self) -> Option<&KeystoreParams> { + None + } + + /// Get the NetworkParams for this object + fn network_params(&self) -> Option<&NetworkParams> { + None + } + + /// Get the NodeKeyParams for this object + fn node_key_params(&self) -> Option<&NodeKeyParams> { + self.network_params() + .map(|x| &x.node_key_params) + } + + /// Get the base path of the configuration (if any) + /// + /// By default this is retrieved from `SharedParams`. + fn base_path(&self) -> Result> { + Ok(self.shared_params().base_path()) + } + + /// Returns `true` if the node is for development or not + /// + /// By default this is retrieved from `SharedParams`. + fn is_dev(&self) -> Result { + Ok(self.shared_params().is_dev()) + } + + /// Gets the role + /// + /// By default this is `Role::Full`. + fn role(&self, _is_dev: bool) -> Result { + Ok(Role::Full) + } + + /// Get the transaction pool options + /// + /// By default this is `TransactionPoolOptions::default()`. + fn transaction_pool(&self) -> Result { + Ok(Default::default()) + } + + /// Get the network configuration + /// + /// By default this is retrieved from `NetworkParams` if it is available otherwise it creates + /// a default `NetworkConfiguration` based on `node_name`, `client_id`, `node_key` and + /// `net_config_dir`. + fn network_config( + &self, + chain_spec: &Box, + is_dev: bool, + net_config_dir: &PathBuf, + client_id: &str, + node_name: &str, + node_key: NodeKeyConfig, + ) -> Result { + Ok(if let Some(network_params) = self.network_params() { + network_params.network_config( + chain_spec, + is_dev, + net_config_dir, + client_id, + node_name, + node_key, + ) + } else { + NetworkConfiguration::new( + node_name, + client_id, + node_key, + net_config_dir, + ) + }) + } + + /// Get the keystore configuration. + /// + /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses + /// `KeystoreConfig::InMemory`. + fn keystore_config(&self, base_path: &PathBuf) -> Result { + self.keystore_params() + .map(|x| x.keystore_config(base_path)) + .unwrap_or(Ok(KeystoreConfig::InMemory)) + } + + /// Get the database cache size. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. + fn database_cache_size(&self) -> Result> { + Ok(self.import_params() + .map(|x| x.database_cache_size()) + .unwrap_or(Default::default())) + } + + /// Get the database configuration. + /// + /// By default this is retrieved from `SharedParams` + fn database_config(&self, base_path: &PathBuf, cache_size: usize) -> Result { + Ok(self.shared_params().database_config(base_path, cache_size)) + } + + /// Get the state cache size. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. + fn state_cache_size(&self) -> Result { + Ok(self.import_params() + .map(|x| x.state_cache_size()) + .unwrap_or(Default::default())) + } + + /// Get the state cache child ratio (if any). + /// + /// By default this is `None`. + fn state_cache_child_ratio(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the pruning mode. + /// + /// By default this is retrieved from `PruningMode` if it is available. Otherwise its + /// `PruningMode::default()`. + fn pruning(&self, is_dev: bool, role: &Role) -> Result { + self.pruning_params() + .map(|x| x.pruning(is_dev, role)) + .unwrap_or(Ok(Default::default())) + } + + /// Get the chain ID (string). + /// + /// By default this is retrieved from `SharedParams`. + fn chain_id(&self, is_dev: bool) -> Result { + Ok(self.shared_params().chain_id(is_dev)) + } + + /// Get the name of the node. + /// + /// By default a random name is generated. + fn node_name(&self) -> Result { + Ok(generate_node_name()) + } + + /// Get the WASM execution method. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `WasmExecutionMethod::default()`. + fn wasm_method(&self) -> Result { + Ok(self.import_params() + .map(|x| x.wasm_method()) + .unwrap_or(Default::default())) + } + + /// Get the execution strategies. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `ExecutionStrategies::default()`. + fn execution_strategies(&self, is_dev: bool) -> Result { + Ok(self.import_params() + .map(|x| x.execution_strategies(is_dev)) + .unwrap_or(Default::default())) + } + + /// Get the RPC HTTP address (`None` if disabled). + /// + /// By default this is `None`. + fn rpc_http(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC websocket address (`None` if disabled). + /// + /// By default this is `None`. + fn rpc_ws(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC websockets maximum connections (`None` if unlimited). + /// + /// By default this is `None`. + fn rpc_ws_max_connections(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC cors (`None` if disabled) + /// + /// By default this is `None`. + fn rpc_cors(&self, _is_dev: bool) -> Result>> { + Ok(Some(Vec::new())) + } + + /// Get the prometheus configuration (`None` if disabled) + /// + /// By default this is `None`. + fn prometheus_config(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the telemetry endpoints (if any) + /// + /// By default this is retrieved from the chain spec loaded by `load_spec`. + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> Result> { + Ok(chain_spec.telemetry_endpoints().clone()) + } + + /// Get the telemetry external transport + /// + /// By default this is `None`. + fn telemetry_external_transport(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the default value for heap pages + /// + /// By default this is `None`. + fn default_heap_pages(&self) -> Result> { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if offchain worker should be used + /// + /// By default this is `false`. + fn offchain_worker(&self, _role: &Role) -> Result { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if authoring should be forced + /// + /// By default this is `false`. + fn force_authoring(&self) -> Result { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if grandpa should be disabled + /// + /// By default this is `false`. + fn disable_grandpa(&self) -> Result { + Ok(Default::default()) + } + + /// Get the development key seed from the current object + /// + /// By default this is `None`. + fn dev_key_seed(&self, _is_dev: bool) -> Result> { + Ok(Default::default()) + } + + /// Get the tracing targets from the current object (if any) + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `None`. + fn tracing_targets(&self) -> Result> { + Ok(self.import_params() + .map(|x| x.tracing_targets()) + .unwrap_or(Default::default())) + } + + /// Get the TracingReceiver value from the current object + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `TracingReceiver::default()`. + fn tracing_receiver(&self) -> Result { + Ok(self.import_params() + .map(|x| x.tracing_receiver()) + .unwrap_or(Default::default())) + } + + /// Get the node key from the current object + /// + /// By default this is retrieved from `NodeKeyParams` if it is available. Otherwise its + /// `NodeKeyConfig::default()`. + fn node_key(&self, net_config_dir: &PathBuf) -> Result { + self.node_key_params() + .map(|x| x.node_key(net_config_dir)) + .unwrap_or(Ok(Default::default())) + } + + /// Get maximum runtime instances + /// + /// By default this is `None`. + fn max_runtime_instances(&self) -> Result> { + Ok(Default::default()) + } + + /// Activate or not the automatic announcing of blocks after import + /// + /// By default this is `false`. + fn announce_block(&self) -> Result { + Ok(true) + } + + /// Create a Configuration object from the current object + fn create_configuration( + &self, + cli: &C, + task_executor: Arc + Send>>) + Send + Sync>, + ) -> Result { + let is_dev = self.is_dev()?; + let chain_id = self.chain_id(is_dev)?; + let chain_spec = cli.load_spec(chain_id.as_str())?; + let config_dir = self + .base_path()? + .unwrap_or_else(|| { + app_dirs::get_app_root( + AppDataType::UserData, + &AppInfo { + name: C::executable_name(), + author: C::author(), + }, + ) + .expect("app directories exist on all supported platforms; qed") + }) + .join("chains") + .join(chain_spec.id()); + let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); + let client_id = C::client_id(); + let database_cache_size = self.database_cache_size()?.unwrap_or(128); + let node_key = self.node_key(&net_config_dir)?; + let role = self.role(is_dev)?; + let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); + + Ok(Configuration { + impl_name: C::impl_name(), + impl_version: C::impl_version(), + task_executor, + transaction_pool: self.transaction_pool()?, + network: self.network_config( + &chain_spec, + is_dev, + &net_config_dir, + client_id.as_str(), + self.node_name()?.as_str(), + node_key, + )?, + keystore: self.keystore_config(&config_dir)?, + database: self.database_config(&config_dir, database_cache_size)?, + state_cache_size: self.state_cache_size()?, + state_cache_child_ratio: self.state_cache_child_ratio()?, + pruning: self.pruning(is_dev, &role)?, + wasm_method: self.wasm_method()?, + execution_strategies: self.execution_strategies(is_dev)?, + rpc_http: self.rpc_http()?, + rpc_ws: self.rpc_ws()?, + rpc_ws_max_connections: self.rpc_ws_max_connections()?, + rpc_cors: self.rpc_cors(is_dev)?, + prometheus_config: self.prometheus_config()?, + telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, + telemetry_external_transport: self.telemetry_external_transport()?, + default_heap_pages: self.default_heap_pages()?, + offchain_worker: self.offchain_worker(&role)?, + force_authoring: self.force_authoring()?, + disable_grandpa: self.disable_grandpa()?, + dev_key_seed: self.dev_key_seed(is_dev)?, + tracing_targets: self.tracing_targets()?, + tracing_receiver: self.tracing_receiver()?, + chain_spec, + max_runtime_instances, + announce_block: self.announce_block()?, + role, + }) + } + + /// Get the filters for the logging. + /// + /// By default this is retrieved from `SharedParams`. + fn log_filters(&self) -> Result> { + Ok(self.shared_params().log_filters()) + } + + /// Initialize substrate. This must be done only once. + /// + /// This method: + /// + /// 1. Set the panic handler + /// 2. Raise the FD limit + /// 3. Initialize the logger + fn init(&self) -> Result<()> { + let logger_pattern = self.log_filters()?.unwrap_or_default(); + + sp_panic_handler::set(C::support_url(), C::impl_version()); + + fdlimit::raise_fd_limit(); + init_logger(logger_pattern.as_str()); + + Ok(()) + } +} + +/// Generate a valid random name for the node +pub fn generate_node_name() -> String { + loop { + let node_name = Generator::with_naming(Name::Numbered) + .next() + .expect("RNG is available on all supported platforms; qed"); + let count = node_name.chars().count(); + + if count < NODE_NAME_MAX_LENGTH { + return node_name; + } + }; +} diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index e28edebd60def53ba34497d416a9bea7919ef983..25b71059b17e62a397bde14bccee91fe95fb62cd 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -19,137 +19,175 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] -mod params; mod arg_enums; -mod error; -mod runtime; mod commands; +mod config; +mod error; +mod params; +mod runner; -pub use sc_service::config::VersionInfo; - -use std::io::Write; - -use regex::Regex; -use structopt::{StructOpt, clap::{self, AppSettings}}; -pub use structopt; -pub use params::*; -pub use commands::*; pub use arg_enums::*; +pub use commands::*; +pub use config::*; pub use error::*; -use log::info; use lazy_static::lazy_static; -pub use crate::runtime::{run_until_exit, run_service_until_exit}; +use log::info; +pub use params::*; +use regex::Regex; +pub use runner::*; +use sc_service::{ChainSpec, Configuration}; +use std::future::Future; +use std::io::Write; +use std::pin::Pin; +use std::sync::Arc; +pub use structopt; +use structopt::{ + clap::{self, AppSettings}, + StructOpt, +}; -/// Helper function used to parse the command line arguments. This is the equivalent of -/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of -/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. +/// Substrate client CLI /// -/// To allow running the node without subcommand, tt also sets a few more settings: -/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. +/// This trait needs to be defined on the root structopt of the application. It will provide the +/// implementation name, version, executable name, description, author, support_url, copyright start +/// year and most importantly: how to load the chain spec. /// -/// Gets the struct from the command line arguments. Print the -/// error message and quit the program in case of failure. -pub fn from_args(version: &VersionInfo) -> T -where - T: StructOpt + Sized, -{ - from_iter::(&mut std::env::args_os(), version) -} +/// StructOpt must not be in scope to use from_args (or the similar methods). This trait provides +/// its own implementation that will fill the necessary field based on the trait's functions. +pub trait SubstrateCli: Sized { + /// Implementation name. + fn impl_name() -> &'static str; + + /// Implementation version. + /// + /// By default this will look like this: 2.0.0-b950f731c-x86_64-linux-gnu where the hash is the + /// short commit hash of the commit of in the Git repository. + fn impl_version() -> &'static str; + + /// Executable file name. + fn executable_name() -> &'static str; + + /// Executable file description. + fn description() -> &'static str; + + /// Executable file author. + fn author() -> &'static str; + + /// Support URL. + fn support_url() -> &'static str; + + /// Copyright starting year (x-current year) + fn copyright_start_year() -> i32; + + /// Chain spec factory + fn load_spec(&self, id: &str) -> std::result::Result, String>; + + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, tt also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from the command line arguments. Print the + /// error message and quit the program in case of failure. + fn from_args() -> Self + where + Self: StructOpt + Sized, + { + ::from_iter(&mut std::env::args_os()) + } -/// Helper function used to parse the command line arguments. This is the equivalent of -/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of -/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. -/// -/// To allow running the node without subcommand, tt also sets a few more settings: -/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. -/// -/// Gets the struct from any iterator such as a `Vec` of your making. -/// Print the error message and quit the program in case of failure. -pub fn from_iter(iter: I, version: &VersionInfo) -> T -where - T: StructOpt + Sized, - I: IntoIterator, - I::Item: Into + Clone, -{ - let app = T::clap(); - - let mut full_version = sc_service::config::full_version_from_strs( - version.version, - version.commit - ); - full_version.push_str("\n"); - - let app = app - .name(version.executable_name) - .author(version.author) - .about(version.description) - .version(full_version.as_str()) - .settings(&[ - AppSettings::GlobalVersion, - AppSettings::ArgsNegateSubcommands, - AppSettings::SubcommandsNegateReqs, - ]); - - T::from_clap(&app.get_matches_from(iter)) -} + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, it also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from any iterator such as a `Vec` of your making. + /// Print the error message and quit the program in case of failure. + fn from_iter(iter: I) -> Self + where + Self: StructOpt + Sized, + I: IntoIterator, + I::Item: Into + Clone, + { + let app = ::clap(); + + let mut full_version = Self::impl_version().to_string(); + full_version.push_str("\n"); + + let app = app + .name(Self::executable_name()) + .author(Self::author()) + .about(Self::description()) + .version(full_version.as_str()) + .settings(&[ + AppSettings::GlobalVersion, + AppSettings::ArgsNegateSubcommands, + AppSettings::SubcommandsNegateReqs, + ]); + + ::from_clap(&app.get_matches_from(iter)) + } -/// Helper function used to parse the command line arguments. This is the equivalent of -/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of -/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. -/// -/// To allow running the node without subcommand, tt also sets a few more settings: -/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. -/// -/// Gets the struct from any iterator such as a `Vec` of your making. -/// Print the error message and quit the program in case of failure. -/// -/// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are -/// used. It will return a [`clap::Error`], where the [`kind`] is a -/// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call -/// [`Error::exit`] or perform a [`std::process::exit`]. -pub fn try_from_iter(iter: I, version: &VersionInfo) -> clap::Result -where - T: StructOpt + Sized, - I: IntoIterator, - I::Item: Into + Clone, -{ - let app = T::clap(); - - let mut full_version = sc_service::config::full_version_from_strs( - version.version, - version.commit, - ); - full_version.push_str("\n"); - - let app = app - .name(version.executable_name) - .author(version.author) - .about(version.description) - .version(full_version.as_str()); - - let matches = app.get_matches_from_safe(iter)?; - - Ok(T::from_clap(&matches)) -} + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, it also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from any iterator such as a `Vec` of your making. + /// Print the error message and quit the program in case of failure. + /// + /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are + /// used. It will return a [`clap::Error`], where the [`kind`] is a + /// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call + /// [`Error::exit`] or perform a [`std::process::exit`]. + fn try_from_iter(iter: I) -> clap::Result + where + Self: StructOpt + Sized, + I: IntoIterator, + I::Item: Into + Clone, + { + let app = ::clap(); + + let mut full_version = Self::impl_version().to_string(); + full_version.push_str("\n"); + + let app = app + .name(Self::executable_name()) + .author(Self::author()) + .about(Self::description()) + .version(full_version.as_str()); + + let matches = app.get_matches_from_safe(iter)?; + + Ok(::from_clap(&matches)) + } -/// Initialize substrate. This must be done only once. -/// -/// This method: -/// -/// 1. Set the panic handler -/// 2. Raise the FD limit -/// 3. Initialize the logger -pub fn init(logger_pattern: &str, version: &VersionInfo) -> error::Result<()> { - let full_version = sc_service::config::full_version_from_strs( - version.version, - version.commit - ); - sp_panic_handler::set(version.support_url, &full_version); - - fdlimit::raise_fd_limit(); - init_logger(logger_pattern); + /// Returns the client ID: `{impl_name}/v{impl_version}` + fn client_id() -> String { + format!("{}/v{}", Self::impl_name(), Self::impl_version()) + } - Ok(()) + /// Only create a Configuration for the command provided in argument + fn create_configuration( + &self, + command: &T, + task_executor: Arc + Send>>) + Send + Sync>, + ) -> error::Result { + command.create_configuration(self, task_executor) + } + + /// Create a runner for the command provided in argument. This will create a Configuration and + /// a tokio runtime + fn create_runner(&self, command: &T) -> error::Result> { + command.init::()?; + Runner::new(self, command) + } } /// Initialize the logger @@ -177,17 +215,22 @@ pub fn init_logger(pattern: &str) { builder.format(move |buf, record| { let now = time::now(); let timestamp = - time::strftime("%Y-%m-%d %H:%M:%S", &now) - .expect("Error formatting log timestamp"); + time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); let mut output = if log::max_level() <= log::LevelFilter::Info { - format!("{} {}", Colour::Black.bold().paint(timestamp), record.args()) + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args(), + ) } else { let name = ::std::thread::current() .name() - .map_or_else(Default::default, |x| format!("{}", Colour::Blue.bold().paint(x))); - let millis = (now.tm_nsec as f32 / 1000000.0).round() as usize; - let timestamp = format!("{}.{:03}", timestamp, millis); + .map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; + let timestamp = format!("{}.{}", timestamp, millis); format!( "{} {} {} {} {}", Colour::Black.bold().paint(timestamp), @@ -211,7 +254,7 @@ pub fn init_logger(pattern: &str) { }); if builder.try_init().is_err() { - info!("Not registering Substrate logger, as there is already a global logger registered!"); + info!("💬 Not registering Substrate logger, as there is already a global logger registered!"); } } @@ -221,3 +264,21 @@ fn kill_color(s: &str) -> String { } RE.replace_all(s, "").to_string() } + +/// Reset the signal pipe (`SIGPIPE`) handler to the default one provided by the system. +/// This will end the program on `SIGPIPE` instead of panicking. +/// +/// This should be called before calling any cli method or printing any output. +pub fn reset_signal_pipe_handler() -> Result<()> { + #[cfg(target_family = "unix")] + { + use nix::sys::signal; + + unsafe { + signal::signal(signal::Signal::SIGPIPE, signal::SigHandler::SigDfl) + .map_err(|e| Error::Other(e.to_string()))?; + } + } + + Ok(()) +} diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 2b826d69c7fb22269f893a7154b35ce6579e9447..08ca1c8f8ffe073951030feb965e29c616c67cd0 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -14,16 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use structopt::StructOpt; -use sc_service::{Configuration, config::DatabaseConfig}; - -use crate::error; use crate::arg_enums::{ - WasmExecutionMethod, TracingReceiver, ExecutionStrategy, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, - DEFAULT_EXECUTION_SYNCING + DEFAULT_EXECUTION_SYNCING, }; use crate::params::PruningParams; +use crate::Result; +use sc_client_api::execution_extensions::ExecutionStrategies; +use sc_service::{PruningMode, Role}; +use structopt::StructOpt; /// Parameters for block import. #[derive(Debug, StructOpt, Clone)] @@ -52,21 +52,21 @@ pub struct ImportParams { #[allow(missing_docs)] #[structopt(flatten)] - pub execution_strategies: ExecutionStrategies, + pub execution_strategies: ExecutionStrategiesParams, /// Limit the memory the database cache can use. - #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] - pub database_cache_size: u32, + #[structopt(long = "db-cache", value_name = "MiB")] + pub database_cache_size: Option, /// Specify the state cache size. #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] pub state_cache_size: usize, - /// Comma separated list of targets for tracing + /// Comma separated list of targets for tracing. #[structopt(long = "tracing-targets", value_name = "TARGETS")] pub tracing_targets: Option, - /// Receiver to process tracing messages + /// Receiver to process tracing messages. #[structopt( long = "tracing-receiver", value_name = "RECEIVER", @@ -78,25 +78,31 @@ pub struct ImportParams { } impl ImportParams { - /// Put block import CLI params into `config` object. - pub fn update_config( - &self, - mut config: &mut Configuration, - role: sc_service::Roles, - is_dev: bool, - ) -> error::Result<()> { - use sc_client_api::execution_extensions::ExecutionStrategies; - - if let Some(DatabaseConfig::Path { ref mut cache_size, .. }) = config.database { - *cache_size = Some(self.database_cache_size); - } + /// Receiver to process tracing messages. + pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { + self.tracing_receiver.clone().into() + } - config.state_cache_size = self.state_cache_size; + /// Comma separated list of targets for tracing. + pub fn tracing_targets(&self) -> Option { + self.tracing_targets.clone() + } - self.pruning_params.update_config(&mut config, role, self.unsafe_pruning)?; + /// Specify the state cache size. + pub fn state_cache_size(&self) -> usize { + self.state_cache_size + } - config.wasm_method = self.wasm_method.into(); + /// Get the WASM execution method from the parameters + pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod { + self.wasm_method.into() + } + /// Get execution strategies for the parameters + pub fn execution_strategies( + &self, + is_dev: bool, + ) -> ExecutionStrategies { let exec = &self.execution_strategies; let exec_all_or = |strat: ExecutionStrategy, default: ExecutionStrategy| { exec.execution.unwrap_or(if strat == default && is_dev { @@ -106,7 +112,7 @@ impl ImportParams { }).into() }; - config.execution_strategies = ExecutionStrategies { + ExecutionStrategies { syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), importing: exec_all_or(exec.execution_import_block, DEFAULT_EXECUTION_IMPORT_BLOCK), block_construction: @@ -114,15 +120,23 @@ impl ImportParams { offchain_worker: exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), - }; + } + } - Ok(()) + /// Get the pruning mode from the parameters + pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { + self.pruning_params.pruning(unsafe_pruning, role) + } + + /// Limit the memory the database cache can use. + pub fn database_cache_size(&self) -> Option { + self.database_cache_size } } /// Execution strategies parameters. #[derive(Debug, StructOpt, Clone)] -pub struct ExecutionStrategies { +pub struct ExecutionStrategiesParams { /// The means of execution used when calling into the runtime while syncing blocks. #[structopt( long = "execution-syncing", diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..c6131c2f64941afa507931e5e090a4389a1e61eb --- /dev/null +++ b/client/cli/src/params/keystore_params.rs @@ -0,0 +1,92 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::error::Result; +use sc_service::config::KeystoreConfig; +use std::fs; +use std::path::PathBuf; +use structopt::StructOpt; + +/// default sub directory for the key store +const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; + +/// Parameters of the keystore +#[derive(Debug, StructOpt, Clone)] +pub struct KeystoreParams { + /// Specify custom keystore path. + #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] + pub keystore_path: Option, + + /// Use interactive shell for entering the password used by the keystore. + #[structopt( + long = "password-interactive", + conflicts_with_all = &[ "password", "password-filename" ] + )] + pub password_interactive: bool, + + /// Password used by the keystore. + #[structopt( + long = "password", + conflicts_with_all = &[ "password-interactive", "password-filename" ] + )] + pub password: Option, + + /// File that contains the password used by the keystore. + #[structopt( + long = "password-filename", + value_name = "PATH", + parse(from_os_str), + conflicts_with_all = &[ "password-interactive", "password" ] + )] + pub password_filename: Option, +} + +impl KeystoreParams { + /// Get the keystore configuration for the parameters + pub fn keystore_config(&self, base_path: &PathBuf) -> Result { + let password = if self.password_interactive { + #[cfg(not(target_os = "unknown"))] + { + Some(input_keystore_password()?.into()) + } + #[cfg(target_os = "unknown")] + None + } else if let Some(ref file) = self.password_filename { + Some( + fs::read_to_string(file) + .map_err(|e| format!("{}", e))? + .into(), + ) + } else if let Some(ref password) = self.password { + Some(password.clone().into()) + } else { + None + }; + + let path = self + .keystore_path + .clone() + .unwrap_or(base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); + + Ok(KeystoreConfig::Path { path, password }) + } +} + +#[cfg(not(target_os = "unknown"))] +fn input_keystore_password() -> Result { + rpassword::read_password_from_tty(Some("Keystore password: ")) + .map_err(|e| format!("{:?}", e).into()) +} diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index f684cab336423159c6a18ff492c3d7741c4cfded..9097bf8589919a25d2a118bab1e1011caa24dda1 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -15,21 +15,23 @@ // along with Substrate. If not, see . mod import_params; -mod transaction_pool_params; -mod shared_params; +mod keystore_params; +mod network_params; mod node_key_params; -mod network_configuration_params; mod pruning_params; +mod shared_params; +mod transaction_pool_params; -use std::str::FromStr; use std::fmt::Debug; +use std::str::FromStr; pub use crate::params::import_params::*; -pub use crate::params::transaction_pool_params::*; -pub use crate::params::shared_params::*; +pub use crate::params::keystore_params::*; +pub use crate::params::network_params::*; pub use crate::params::node_key_params::*; -pub use crate::params::network_configuration_params::*; pub use crate::params::pruning_params::*; +pub use crate::params::shared_params::*; +pub use crate::params::transaction_pool_params::*; /// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. #[derive(Debug, Clone)] diff --git a/client/cli/src/params/network_configuration_params.rs b/client/cli/src/params/network_params.rs similarity index 55% rename from client/cli/src/params/network_configuration_params.rs rename to client/cli/src/params/network_params.rs index 974fa0be937a70dd117914b04aaaa31031705cf2..21e44f97822862e1d53e0d0f86b2339abc51112e 100644 --- a/client/cli/src/params/network_configuration_params.rs +++ b/client/cli/src/params/network_params.rs @@ -14,28 +14,27 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::path::PathBuf; +use crate::params::node_key_params::NodeKeyParams; +use sc_network::{ + config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, + multiaddr::Protocol, +}; +use sc_service::{ChainSpec, config::{Multiaddr, MultiaddrWithPeerId}}; use std::iter; use std::net::Ipv4Addr; +use std::path::PathBuf; use structopt::StructOpt; -use sc_network::{ - config::{NonReservedPeerMode, TransportConfig}, multiaddr::Protocol, -}; -use sc_service::Configuration; - -use crate::error; -use crate::params::node_key_params::NodeKeyParams; /// Parameters used to create the network configuration. #[derive(Debug, StructOpt, Clone)] -pub struct NetworkConfigurationParams { +pub struct NetworkParams { /// Specify a list of bootnodes. - #[structopt(long = "bootnodes", value_name = "URL")] - pub bootnodes: Vec, + #[structopt(long = "bootnodes", value_name = "ADDR")] + pub bootnodes: Vec, /// Specify a list of reserved node addresses. - #[structopt(long = "reserved-nodes", value_name = "URL")] - pub reserved_nodes: Vec, + #[structopt(long = "reserved-nodes", value_name = "ADDR")] + pub reserved_nodes: Vec, /// Whether to only allow connections to/from reserved nodes. /// @@ -44,17 +43,9 @@ pub struct NetworkConfigurationParams { #[structopt(long = "reserved-only")] pub reserved_only: bool, - /// Specify a list of sentry node public addresses. - #[structopt( - long = "sentry-nodes", - value_name = "URL", - conflicts_with_all = &[ "sentry" ] - )] - pub sentry_nodes: Vec, - /// Listen on this multiaddress. #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] - pub listen_addr: Vec, + pub listen_addr: Vec, /// Specify p2p protocol TCP port. /// @@ -83,11 +74,15 @@ pub struct NetworkConfigurationParams { #[structopt(long = "no-mdns")] pub no_mdns: bool, - /// Maximum number of peers to ask the same blocks in parallel. + /// Maximum number of peers from which to ask for the same blocks in parallel. /// - /// This allows downlading announced blocks from multiple peers. Decrease to save + /// This allows downloading announced blocks from multiple peers. Decrease to save /// traffic and risk increased latency. - #[structopt(long = "max-parallel-downloads", value_name = "COUNT", default_value = "5")] + #[structopt( + long = "max-parallel-downloads", + value_name = "COUNT", + default_value = "5" + )] pub max_parallel_downloads: u32, #[allow(missing_docs)] @@ -99,59 +94,51 @@ pub struct NetworkConfigurationParams { pub use_yamux_flow_control: bool, } -impl NetworkConfigurationParams { +impl NetworkParams { /// Fill the given `NetworkConfiguration` by looking at the cli parameters. - pub fn update_config( + pub fn network_config( &self, - mut config: &mut Configuration, - config_path: PathBuf, - client_id: String, + chain_spec: &Box, is_dev: bool, - ) -> error::Result<()> { - config.network.boot_nodes.extend(self.bootnodes.clone()); - config.network.config_path = Some(config_path.clone()); - config.network.net_config_path = Some(config_path.clone()); - - config.network.reserved_nodes.extend(self.reserved_nodes.clone()); - if self.reserved_only { - config.network.non_reserved_mode = NonReservedPeerMode::Deny; - } - - config.network.sentry_nodes.extend(self.sentry_nodes.clone()); - - for addr in self.listen_addr.iter() { - let addr = addr.parse().ok().ok_or(error::Error::InvalidListenMultiaddress)?; - config.network.listen_addresses.push(addr); - } - - if config.network.listen_addresses.is_empty() { - let port = match self.port { - Some(port) => port, - None => 30333, - }; - - config.network.listen_addresses = vec![ - iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) - .chain(iter::once(Protocol::Tcp(port))) - .collect() - ]; + net_config_path: &PathBuf, + client_id: &str, + node_name: &str, + node_key: NodeKeyConfig, + ) -> NetworkConfiguration { + let port = self.port.unwrap_or(30333); + let mut listen_addresses = vec![iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) + .chain(iter::once(Protocol::Tcp(port))) + .collect()]; + + listen_addresses.extend(self.listen_addr.iter().cloned()); + + let mut boot_nodes = chain_spec.boot_nodes().to_vec(); + boot_nodes.extend(self.bootnodes.clone()); + + NetworkConfiguration { + boot_nodes, + net_config_path: net_config_path.clone(), + reserved_nodes: self.reserved_nodes.clone(), + non_reserved_mode: if self.reserved_only { + NonReservedPeerMode::Deny + } else { + NonReservedPeerMode::Accept + }, + listen_addresses, + public_addresses: Vec::new(), + notifications_protocols: Vec::new(), + node_key, + node_name: node_name.to_string(), + client_version: client_id.to_string(), + in_peers: self.in_peers, + out_peers: self.out_peers, + transport: TransportConfig::Normal { + enable_mdns: !is_dev && !self.no_mdns, + allow_private_ipv4: !self.no_private_ipv4, + wasm_external_transport: None, + use_yamux_flow_control: self.use_yamux_flow_control, + }, + max_parallel_downloads: self.max_parallel_downloads, } - - config.network.client_version = client_id; - self.node_key_params.update_config(&mut config, Some(&config_path))?; - - config.network.in_peers = self.in_peers; - config.network.out_peers = self.out_peers; - - config.network.transport = TransportConfig::Normal { - enable_mdns: !is_dev && !self.no_mdns, - allow_private_ipv4: !self.no_private_ipv4, - wasm_external_transport: None, - use_yamux_flow_control: self.use_yamux_flow_control, - }; - - config.network.max_parallel_downloads = self.max_parallel_downloads; - - Ok(()) } } diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index aef3af94c77f29b7b0401f55133ca621448907aa..2913ff2c1035e2f04ae30888e9f352420c40331d 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -14,14 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{path::PathBuf, str::FromStr}; -use structopt::StructOpt; -use sc_service::Configuration; use sc_network::config::NodeKeyConfig; use sp_core::H256; +use std::{path::PathBuf, str::FromStr}; +use structopt::StructOpt; -use crate::error; use crate::arg_enums::NodeKeyType; +use crate::error; /// The file name of the node's Ed25519 secret key inside the chain-specific /// network config directory, if neither `--node-key` nor `--node-key-file` @@ -38,7 +37,7 @@ pub struct NodeKeyParams { /// `--node-key-type` as follows: /// /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 bytes secret key, + /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, /// i.e. 64 hex characters. /// /// The value of this option takes precedence over `--node-key-file`. @@ -82,7 +81,7 @@ pub struct NodeKeyParams { /// as follows: /// /// `ed25519`: - /// The file must contain an unencoded 32 bytes Ed25519 secret key. + /// The file must contain an unencoded 32 byte Ed25519 secret key. /// /// If the file does not exist, it is created with a newly generated secret key of /// the chosen type. @@ -93,31 +92,23 @@ pub struct NodeKeyParams { impl NodeKeyParams { /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context /// of an optional network config storage directory. - pub fn update_config<'a>( - &self, - mut config: &'a mut Configuration, - net_config_path: Option<&PathBuf>, - ) -> error::Result<&'a NodeKeyConfig> { - config.network.node_key = match self.node_key_type { + pub fn node_key(&self, net_config_dir: &PathBuf) -> error::Result { + Ok(match self.node_key_type { NodeKeyType::Ed25519 => { let secret = if let Some(node_key) = self.node_key.as_ref() { parse_ed25519_secret(node_key)? } else { - let path = self.node_key_file.clone() - .or_else(|| net_config_path.map(|d| d.join(NODE_KEY_ED25519_FILE))); + let path = self + .node_key_file + .clone() + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); - if let Some(path) = path { - sc_network::config::Secret::File(path) - } else { - sc_network::config::Secret::New - } + sc_network::config::Secret::File(path) }; NodeKeyConfig::Ed25519(secret) } - }; - - Ok(&config.network.node_key) + }) } } @@ -128,114 +119,107 @@ fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. fn parse_ed25519_secret(hex: &str) -> error::Result { - H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| - sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) - .map_err(invalid_node_key)) + H256::from_str(&hex) + .map_err(invalid_node_key) + .and_then(|bytes| { + sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) + .map(sc_network::config::Secret::Input) + .map_err(invalid_node_key) + }) } #[cfg(test)] mod tests { - use sc_network::config::identity::ed25519; use super::*; + use sc_network::config::identity::ed25519; #[test] fn test_node_key_config_input() { - fn secret_input(net_config_dir: Option<&PathBuf>) -> error::Result<()> { + fn secret_input(net_config_dir: &PathBuf) -> error::Result<()> { NodeKeyType::variants().iter().try_for_each(|t| { - let mut config = Configuration::default(); let node_key_type = NodeKeyType::from_str(t).unwrap(); let sk = match node_key_type { - NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec() + NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec(), }; let params = NodeKeyParams { node_key_type, node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), - node_key_file: None + node_key_file: None, }; - params.update_config(&mut config, net_config_dir).and_then(|c| match c { + params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) - if node_key_type == NodeKeyType::Ed25519 && - &sk[..] == ski.as_ref() => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) + if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) } - assert!(secret_input(None).is_ok()); - assert!(secret_input(Some(&PathBuf::from_str("x").unwrap())).is_ok()); + assert!(secret_input(&PathBuf::from_str("x").unwrap()).is_ok()); } #[test] fn test_node_key_config_file() { - fn secret_file(net_config_dir: Option<&PathBuf>) -> error::Result<()> { + fn secret_file(net_config_dir: &PathBuf) -> error::Result<()> { NodeKeyType::variants().iter().try_for_each(|t| { - let mut config = Configuration::default(); let node_key_type = NodeKeyType::from_str(t).unwrap(); let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); let params = NodeKeyParams { node_key_type, node_key: None, - node_key_file: Some(file.clone()) + node_key_file: Some(file.clone()), }; - params.update_config(&mut config, net_config_dir).and_then(|c| match c { + params.node_key(net_config_dir).and_then(|c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if node_key_type == NodeKeyType::Ed25519 && f == &file => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) + if node_key_type == NodeKeyType::Ed25519 && f == &file => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), }) }) } - assert!(secret_file(None).is_ok()); - assert!(secret_file(Some(&PathBuf::from_str("x").unwrap())).is_ok()); + assert!(secret_file(&PathBuf::from_str("x").unwrap()).is_ok()); } #[test] fn test_node_key_config_default() { fn with_def_params(f: F) -> error::Result<()> where - F: Fn(NodeKeyParams) -> error::Result<()> + F: Fn(NodeKeyParams) -> error::Result<()>, { NodeKeyType::variants().iter().try_for_each(|t| { let node_key_type = NodeKeyType::from_str(t).unwrap(); f(NodeKeyParams { node_key_type, node_key: None, - node_key_file: None + node_key_file: None, }) }) } - fn no_config_dir() -> error::Result<()> { - with_def_params(|params| { - let mut config = Configuration::default(); - let typ = params.node_key_type; - params.update_config(&mut config, None) - .and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::New) - if typ == NodeKeyType::Ed25519 => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) - }) - } - fn some_config_dir(net_config_dir: &PathBuf) -> error::Result<()> { with_def_params(|params| { - let mut config = Configuration::default(); let dir = PathBuf::from(net_config_dir.clone()); let typ = params.node_key_type; - params.update_config(&mut config, Some(net_config_dir)) + params + .node_key(net_config_dir) .and_then(move |c| match c { NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 && - f == &dir.join(NODE_KEY_ED25519_FILE) => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) + if typ == NodeKeyType::Ed25519 + && f == &dir.join(NODE_KEY_ED25519_FILE) => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) }) } - assert!(no_config_dir().is_ok()); assert!(some_config_dir(&PathBuf::from_str("x").unwrap()).is_ok()); } } diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index ec1066df953fdc01538e089af632aeaa128358d5..ed8f7ab16858dce49f0b8a78ae2bb60141a47eac 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use structopt::StructOpt; -use sc_service::{Configuration, PruningMode}; - use crate::error; +use sc_service::{PruningMode, Role}; +use structopt::StructOpt; /// Parameters to define the pruning mode #[derive(Debug, StructOpt, Clone)] @@ -32,35 +31,29 @@ pub struct PruningParams { } impl PruningParams { - /// Put block pruning CLI params into `config` object. - pub fn update_config( - &self, - mut config: &mut Configuration, - role: sc_service::Roles, - unsafe_pruning: bool, - ) -> error::Result<()> { + /// Get the pruning value from the parameters + pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { // by default we disable pruning if the node is an authority (i.e. // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the // node is an authority and pruning is enabled explicitly, then we error // unless `unsafe_pruning` is set. - config.pruning = match &self.pruning { + Ok(match &self.pruning { Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role == sc_service::Roles::AUTHORITY => PruningMode::ArchiveAll, + None if role.is_network_authority() => PruningMode::ArchiveAll, None => PruningMode::default(), Some(s) => { - if role == sc_service::Roles::AUTHORITY && !unsafe_pruning { + if role.is_network_authority() && !unsafe_pruning { return Err(error::Error::Input( "Validators should run with state pruning disabled (i.e. archive). \ - You can ignore this check with `--unsafe-pruning`.".to_string() + You can ignore this check with `--unsafe-pruning`." + .to_string(), )); } - PruningMode::keep_blocks(s.parse() - .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))? - ) - }, - }; - - Ok(()) + PruningMode::keep_blocks(s.parse().map_err(|_| { + error::Error::Input("Invalid pruning mode specified".to_string()) + })?) + } + }) } } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index 310e3de5da8d0c54b58bc478667aa273b93df72b..f7f9db102c714dfb5e33a8ed54468f7dccee0fe3 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -14,23 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use sc_service::config::DatabaseConfig; use std::path::PathBuf; use structopt::StructOpt; -use app_dirs::{AppInfo, AppDataType}; -use sc_service::{ - Configuration, config::DatabaseConfig, ChainSpec, -}; - -use crate::VersionInfo; -use crate::error; /// default sub directory to store database -const DEFAULT_DB_CONFIG_PATH : &'static str = "db"; +const DEFAULT_DB_CONFIG_PATH: &'static str = "db"; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt, Clone)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local or staging). + /// Specify the chain specification (one of dev, local, or staging). #[structopt(long = "chain", value_name = "CHAIN_SPEC")] pub chain: Option, @@ -39,71 +33,61 @@ pub struct SharedParams { pub dev: bool, /// Specify custom base path. - #[structopt(long = "base-path", short = "d", value_name = "PATH", parse(from_os_str))] + #[structopt( + long = "base-path", + short = "d", + value_name = "PATH", + parse(from_os_str) + )] pub base_path: Option, - /// Sets a custom logging filter. + /// Sets a custom logging filter. Syntax is =, e.g. -lsync=debug. + /// + /// Log levels (least to most verbose) are error, warn, info, debug, and trace. + /// By default, all targets log `info`. The global log level can be set with -l. #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] pub log: Option, } impl SharedParams { - /// Load spec to `Configuration` from `SharedParams` and spec factory. - pub fn update_config<'a, F>( - &self, - mut config: &'a mut Configuration, - spec_factory: F, - version: &VersionInfo, - ) -> error::Result<&'a dyn ChainSpec> where - F: FnOnce(&str) -> Result, String>, - { - let chain_key = match self.chain { - Some(ref chain) => chain.clone(), - None => if self.dev { "dev".into() } else { "".into() } - }; - let spec = spec_factory(&chain_key)?; - config.network.boot_nodes = spec.boot_nodes().to_vec(); - config.telemetry_endpoints = spec.telemetry_endpoints().clone(); + /// Specify custom base path. + pub fn base_path(&self) -> Option { + self.base_path.clone() + } - config.chain_spec = Some(spec); + /// Specify the development chain. + pub fn is_dev(&self) -> bool { + self.dev + } - if config.config_dir.is_none() { - config.config_dir = Some(base_path(self, version)); + /// Get the chain spec for the parameters provided + pub fn chain_id(&self, is_dev: bool) -> String { + match self.chain { + Some(ref chain) => chain.clone(), + None => { + if is_dev { + "dev".into() + } else { + "".into() + } + } } + } - if config.database.is_none() { - config.database = Some(DatabaseConfig::Path { - path: config - .in_chain_config_dir(DEFAULT_DB_CONFIG_PATH) - .expect("We provided a base_path/config_dir."), - cache_size: None, - }); + /// Get the database configuration object for the parameters provided + pub fn database_config( + &self, + base_path: &PathBuf, + cache_size: usize, + ) -> DatabaseConfig { + DatabaseConfig::Path { + path: base_path.join(DEFAULT_DB_CONFIG_PATH), + cache_size, } - - Ok(config.expect_chain_spec()) } - /// Initialize substrate. This must be done only once. - /// - /// This method: - /// - /// 1. Set the panic handler - /// 2. Raise the FD limit - /// 3. Initialize the logger - pub fn init(&self, version: &VersionInfo) -> error::Result<()> { - crate::init(self.log.as_ref().map(|v| v.as_ref()).unwrap_or(""), version) + /// Get the filters for the logging + pub fn log_filters(&self) -> Option { + self.log.clone() } } - -fn base_path(cli: &SharedParams, version: &VersionInfo) -> PathBuf { - cli.base_path.clone() - .unwrap_or_else(|| - app_dirs::get_app_root( - AppDataType::UserData, - &AppInfo { - name: version.executable_name, - author: version.author - } - ).expect("app directories exist on all supported platforms; qed") - ) -} diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index 3468c12243b3fe75d872891aebf12b3df6f0e356..dfcdf9af70537e51586654e4cc72da4a7549f118 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -14,9 +14,8 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use sc_service::config::TransactionPoolOptions; use structopt::StructOpt; -use sc_service::Configuration; -use crate::error; /// Parameters used to create the pool configuration. #[derive(Debug, StructOpt, Clone)] @@ -24,6 +23,7 @@ pub struct TransactionPoolParams { /// Maximum number of transactions in the transaction pool. #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] pub pool_limit: usize, + /// Maximum number of kilobytes of all transactions stored in the pool. #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "20480")] pub pool_kbytes: usize, @@ -31,19 +31,18 @@ pub struct TransactionPoolParams { impl TransactionPoolParams { /// Fill the given `PoolConfiguration` by looking at the cli parameters. - pub fn update_config( - &self, - config: &mut Configuration, - ) -> error::Result<()> { + pub fn transaction_pool(&self) -> TransactionPoolOptions { + let mut opts = TransactionPoolOptions::default(); + // ready queue - config.transaction_pool.ready.count = self.pool_limit; - config.transaction_pool.ready.total_bytes = self.pool_kbytes * 1024; + opts.ready.count = self.pool_limit; + opts.ready.total_bytes = self.pool_kbytes * 1024; // future queue let factor = 10; - config.transaction_pool.future.count = self.pool_limit / factor; - config.transaction_pool.future.total_bytes = self.pool_kbytes * 1024 / factor; + opts.future.count = self.pool_limit / factor; + opts.future.total_bytes = self.pool_kbytes * 1024 / factor; - Ok(()) + opts } } diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs new file mode 100644 index 0000000000000000000000000000000000000000..bd5dc7100ef02a58855032d876108a0a1573e6c0 --- /dev/null +++ b/client/cli/src/runner.rs @@ -0,0 +1,237 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::CliConfiguration; +use crate::Result; +use crate::SubstrateCli; +use crate::Subcommand; +use chrono::prelude::*; +use futures::pin_mut; +use futures::select; +use futures::{future, future::FutureExt, Future}; +use log::info; +use sc_service::{AbstractService, Configuration, Role, ServiceBuilderCommand}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_utils::metrics::{TOKIO_THREADS_ALIVE, TOKIO_THREADS_TOTAL}; +use std::fmt::Debug; +use std::marker::PhantomData; +use std::sync::Arc; + +#[cfg(target_family = "unix")] +async fn main(func: F) -> std::result::Result<(), Box> +where + F: Future> + future::FusedFuture, + E: 'static + std::error::Error, +{ + use tokio::signal::unix::{signal, SignalKind}; + + let mut stream_int = signal(SignalKind::interrupt())?; + let mut stream_term = signal(SignalKind::terminate())?; + + let t1 = stream_int.recv().fuse(); + let t2 = stream_term.recv().fuse(); + let t3 = func; + + pin_mut!(t1, t2, t3); + + select! { + _ = t1 => {}, + _ = t2 => {}, + res = t3 => res?, + } + + Ok(()) +} + +#[cfg(not(unix))] +async fn main(func: F) -> std::result::Result<(), Box> +where + F: Future> + future::FusedFuture, + E: 'static + std::error::Error, +{ + use tokio::signal::ctrl_c; + + let t1 = ctrl_c().fuse(); + let t2 = func; + + pin_mut!(t1, t2); + + select! { + _ = t1 => {}, + res = t2 => res?, + } + + Ok(()) +} + +/// Build a tokio runtime with all features +pub fn build_runtime() -> std::result::Result { + tokio::runtime::Builder::new() + .thread_name("main-tokio-") + .threaded_scheduler() + .on_thread_start(||{ + TOKIO_THREADS_ALIVE.inc(); + TOKIO_THREADS_TOTAL.inc(); + }) + .on_thread_stop(||{ + TOKIO_THREADS_ALIVE.dec(); + }) + .enable_all() + .build() +} + +fn run_until_exit(mut tokio_runtime: tokio::runtime::Runtime, future: FUT) -> Result<()> +where + FUT: Future> + future::Future, + ERR: 'static + std::error::Error, +{ + let f = future.fuse(); + pin_mut!(f); + + tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + + Ok(()) +} + +/// A Substrate CLI runtime that can be used to run a node or a command +pub struct Runner { + config: Configuration, + tokio_runtime: tokio::runtime::Runtime, + phantom: PhantomData, +} + +impl Runner { + /// Create a new runtime with the command provided in argument + pub fn new(cli: &C, command: &T) -> Result> { + let tokio_runtime = build_runtime()?; + + let task_executor = { + let runtime_handle = tokio_runtime.handle().clone(); + Arc::new(move |fut| { + runtime_handle.spawn(fut); + }) + }; + + Ok(Runner { + config: command.create_configuration(cli, task_executor)?, + tokio_runtime, + phantom: PhantomData, + }) + } + + /// A helper function that runs an `AbstractService` with tokio and stops if the process receives + /// the signal `SIGTERM` or `SIGINT`. + pub fn run_node(self, new_light: FNL, new_full: FNF) -> Result<()> + where + FNL: FnOnce(Configuration) -> sc_service::error::Result, + FNF: FnOnce(Configuration) -> sc_service::error::Result, + SL: AbstractService + Unpin, + SF: AbstractService + Unpin, + { + info!("{}", C::impl_name()); + info!("✌️ version {}", C::impl_version()); + info!( + "❤️ by {}, {}-{}", + C::author(), + C::copyright_start_year(), + Local::today().year(), + ); + info!("📋 Chain specification: {}", self.config.chain_spec.name()); + info!("🏷 Node name: {}", self.config.network.node_name); + info!("👤 Role: {}", self.config.display_role()); + + match self.config.role { + Role::Light => self.run_service_until_exit(new_light), + _ => self.run_service_until_exit(new_full), + } + } + + /// A helper function that runs a future with tokio and stops if the process receives the signal + /// `SIGTERM` or `SIGINT`. + pub fn run_subcommand(self, subcommand: &Subcommand, builder: B) -> Result<()> + where + B: FnOnce(Configuration) -> sc_service::error::Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: Debug, + ::Hash: std::str::FromStr, + { + match subcommand { + Subcommand::BuildSpec(cmd) => cmd.run(self.config), + Subcommand::ExportBlocks(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::ImportBlocks(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::CheckBlock(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::Revert(cmd) => cmd.run(self.config, builder), + Subcommand::PurgeChain(cmd) => cmd.run(self.config), + } + } + + fn run_service_until_exit(mut self, service_builder: F) -> Result<()> + where + F: FnOnce(Configuration) -> std::result::Result, + T: AbstractService + Unpin, + { + let service = service_builder(self.config)?; + + let informant_future = sc_informant::build(&service, sc_informant::OutputFormat::Coloured); + let _informant_handle = self.tokio_runtime.spawn(informant_future); + + // we eagerly drop the service so that the internal exit future is fired, + // but we need to keep holding a reference to the global telemetry guard + // and drop the runtime first. + let _telemetry = service.telemetry(); + + let f = service.fuse(); + pin_mut!(f); + + self.tokio_runtime + .block_on(main(f)) + .map_err(|e| e.to_string())?; + drop(self.tokio_runtime); + + Ok(()) + } + + /// A helper function that runs a command with the configuration of this node + pub fn sync_run(self, runner: impl FnOnce(Configuration) -> Result<()>) -> Result<()> { + runner(self.config) + } + + /// A helper function that runs a future with tokio and stops if the process receives + /// the signal SIGTERM or SIGINT + pub fn async_run(self, runner: impl FnOnce(Configuration) -> FUT) -> Result<()> + where + FUT: Future>, + { + run_until_exit(self.tokio_runtime, runner(self.config)) + } + + /// Get an immutable reference to the node Configuration + pub fn config(&self) -> &Configuration { + &self.config + } + + /// Get a mutable reference to the node Configuration + pub fn config_mut(&mut self) -> &Configuration { + &mut self.config + } +} diff --git a/client/cli/src/runtime.rs b/client/cli/src/runtime.rs deleted file mode 100644 index 183196139ffb05953b010e4a7028cced82cb3dba..0000000000000000000000000000000000000000 --- a/client/cli/src/runtime.rs +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::sync::Arc; - -use futures::{Future, future, future::FutureExt}; -use futures::select; -use futures::pin_mut; -use sc_service::{AbstractService, Configuration}; -use crate::error; - -#[cfg(target_family = "unix")] -async fn main(func: F) -> Result<(), Box> -where - F: Future> + future::FusedFuture, - E: 'static + std::error::Error, -{ - use tokio::signal::unix::{signal, SignalKind}; - - let mut stream_int = signal(SignalKind::interrupt())?; - let mut stream_term = signal(SignalKind::terminate())?; - - let t1 = stream_int.recv().fuse(); - let t2 = stream_term.recv().fuse(); - let t3 = func; - - pin_mut!(t1, t2, t3); - - select! { - _ = t1 => {}, - _ = t2 => {}, - res = t3 => res?, - } - - Ok(()) -} - -#[cfg(not(unix))] -async fn main(func: F) -> Result<(), Box> -where - F: Future> + future::FusedFuture, - E: 'static + std::error::Error, -{ - use tokio::signal::ctrl_c; - - let t1 = ctrl_c().fuse(); - let t2 = func; - - pin_mut!(t1, t2); - - select! { - _ = t1 => {}, - res = t2 => res?, - } - - Ok(()) -} - -fn build_runtime() -> Result { - tokio::runtime::Builder::new() - .thread_name("main-tokio-") - .threaded_scheduler() - .enable_all() - .build() -} - -/// A helper function that runs a future with tokio and stops if the process receives the signal -/// SIGTERM or SIGINT -pub fn run_until_exit( - mut config: Configuration, - future_builder: F, -) -> error::Result<()> -where - F: FnOnce(Configuration) -> error::Result, - FUT: Future> + future::Future, - ERR: 'static + std::error::Error, -{ - let mut runtime = build_runtime()?; - - config.task_executor = { - let runtime_handle = runtime.handle().clone(); - Some(Arc::new(move |fut| { runtime_handle.spawn(fut); })) - }; - - let f = future_builder(config)?; - let f = f.fuse(); - pin_mut!(f); - - runtime.block_on(main(f)).map_err(|e| e.to_string())?; - - Ok(()) -} - -/// A helper function that runs an `AbstractService` with tokio and stops if the process receives -/// the signal SIGTERM or SIGINT -pub fn run_service_until_exit( - mut config: Configuration, - service_builder: F, -) -> error::Result<()> -where - F: FnOnce(Configuration) -> Result, - T: AbstractService + Unpin, -{ - let mut runtime = build_runtime()?; - - config.task_executor = { - let runtime_handle = runtime.handle().clone(); - Some(Arc::new(move |fut| { runtime_handle.spawn(fut); })) - }; - - let service = service_builder(config)?; - - let informant_future = sc_informant::build(&service, sc_informant::OutputFormat::Coloured); - let _informant_handle = runtime.spawn(informant_future); - - // we eagerly drop the service so that the internal exit future is fired, - // but we need to keep holding a reference to the global telemetry guard - // and drop the runtime first. - let _telemetry = service.telemetry(); - - let f = service.fuse(); - pin_mut!(f); - - runtime.block_on(main(f)).map_err(|e| e.to_string())?; - drop(runtime); - - Ok(()) -} diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index f0b90f381932b6b34721bbc33764c3a5f57d741f..f0fe368acbb037f0b9181263889baa31d3e3f660 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-aura" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" @@ -9,37 +9,40 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } -sc-block-builder = { version = "0.8.0-alpha.3", path = "../../../client/block-builder" } -sc-client = { version = "0.8.0-alpha.2", path = "../../" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../../primitives/block-builder" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../../client/block-builder" } +sc-client = { version = "0.8.0-alpha.5", path = "../../" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../../keystore" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../keystore" } log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } -sp-version = { version = "2.0.0-alpha.2", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } +sp-version = { version = "2.0.0-alpha.5", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.8.0-alpha.5", path = "../slots" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-alpha.5", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../../telemetry" } [dev-dependencies] -sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../executor" } -sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../executor" } +sc-network = { version = "0.8.0-alpha.5", path = "../../network" } sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } -sc-service = { version = "0.8.0-alpha.2", path = "../../service" } +sc-service = { version = "0.8.0-alpha.5", path = "../../service" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } env_logger = "0.7.0" tempfile = "3.1.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 6bb87acd0e81cf320f09f3326f0fa0c0954e1f28..56674546d372bb67ed8b44ef0ece086e1febb653 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -230,8 +230,8 @@ impl sc_consensus_slots::SimpleSlotWorker for AuraW authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { - epoch_data.len() + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + Some(epoch_data.len()) } fn claim_slot( diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 66455adbdf8284a822a98a4bf4a2314964445fe5..19c5bf9e588b3d51f933855ddd01e1a80cd6eae9 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" @@ -9,53 +9,56 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sc-consensus-babe" - [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0-alpha.2", path = "../../../primitives/version" } -sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../../keystore" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -sc-client = { version = "0.8.0-alpha.2", path = "../../" } -sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../epochs" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sc-consensus-uncles = { version = "0.8.0-alpha.2", path = "../uncles" } -sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } -futures = "0.3.1" +sp-version = { version = "2.0.0-alpha.5", path = "../../../primitives/version" } +sp-io = { version = "2.0.0-alpha.5", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0-alpha.5", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../../telemetry" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../keystore" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +sc-client = { version = "0.8.0-alpha.5", path = "../../" } +sc-consensus-epochs = { version = "0.8.0-alpha.5", path = "../epochs" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/vrf" } +sc-consensus-uncles = { version = "0.8.0-alpha.5", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0-alpha.5", path = "../slots" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +fork-tree = { version = "2.0.0-alpha.5", path = "../../../utils/fork-tree" } +futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"] } +schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"] } rand = "0.7.2" -merlin = "1.2.1" +merlin = "2.0" pdqselect = "0.1.0" derive_more = "0.99.2" [dev-dependencies] -sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../executor" } -sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../executor" } +sc-network = { version = "0.8.0-alpha.5", path = "../../network" } sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } -sc-service = { version = "0.8.0-alpha.2", path = "../../service" } +sc-service = { version = "0.8.0-alpha.5", path = "../../service" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../block-builder" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../block-builder" } env_logger = "0.7.0" tempfile = "3.1.0" [features] test-helpers = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 3f2cc4f0bcbce8913ca9183815edd5cffe39928f..6bdaeb5ddf0709a336b0bccf6d30698ba810d2dd 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" @@ -9,24 +9,27 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../" } +sc-consensus-babe = { version = "0.8.0-alpha.5", path = "../" } jsonrpc-core = "14.0.3" -jsonrpc-core-client = "14.0.3" +jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.3" -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../../primitives/consensus/babe" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../epochs" } -futures = "0.3.1" +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0-alpha.5", path = "../../epochs" } +futures = "0.3.4" derive_more = "0.99.2" -sp-api = { version = "2.0.0-alpha.2", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../../primitives/core" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../../../keystore" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../../primitives/core" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../../../keystore" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../../test-utils/runtime/client" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../../primitives/application-crypto" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../../../../primitives/keyring" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../../../primitives/application-crypto" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../../../primitives/keyring" } tempfile = "3.1.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index ddf6375880cb632f72f24019b1232c3201f8b1a8..cb78504b1f78882395f5273afbfc5adb2a5c45a3 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -179,7 +179,7 @@ fn epoch_data( SC: SelectChain, { let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_for_child_of( + epoch_changes.lock().epoch_data_for_child_of( descendent_query(&**client), &parent.hash(), parent.number().clone(), @@ -187,7 +187,6 @@ fn epoch_data( |slot| babe_config.genesis_epoch(slot), ) .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? - .map(|e| e.into_inner()) .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) } diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index a01ea63bbe111ef193d65de6a4264d7eb8a5ede4..074e582bff252395a18e691211eda944b380c723 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -21,7 +21,8 @@ use sp_consensus_babe::{ AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX, SlotNumber, AuthorityPair, BabeConfiguration }; -use sp_consensus_babe::digests::PreDigest; +use sp_consensus_babe::digests::{PreDigest, PrimaryPreDigest, SecondaryPreDigest}; +use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; use sp_core::{U256, blake2_256}; use codec::Encode; use schnorrkel::vrf::VRFInOut; @@ -127,10 +128,10 @@ fn claim_secondary_slot( }) { if pair.public() == *expected_author { - let pre_digest = PreDigest::Secondary { + let pre_digest = PreDigest::Secondary(SecondaryPreDigest { slot_number, authority_index: authority_index as u32, - }; + }); return Some((pre_digest, pair)); } @@ -199,12 +200,12 @@ fn claim_primary_slot( let pre_digest = get_keypair(&pair) .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) .map(|s| { - PreDigest::Primary { + PreDigest::Primary(PrimaryPreDigest { slot_number, - vrf_output: s.0.to_output(), - vrf_proof: s.1, + vrf_output: VRFOutput(s.0.to_output()), + vrf_proof: VRFProof(s.1), authority_index: authority_index as u32, - } + }) }); // early exit on first successful claim diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index 2f64157f22951dd79a8a6c0d4d238e4eccbf1f42..e014c8975acee02b1efddae059345b08cd395d6c 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -25,10 +25,12 @@ use sc_client_api::backend::AuxStore; use sp_blockchain::{Result as ClientResult, Error as ClientError}; use sp_runtime::traits::Block as BlockT; use sp_consensus_babe::BabeBlockWeight; -use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges}; +use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges, migration::EpochChangesForV0}; use crate::Epoch; -const BABE_EPOCH_CHANGES: &[u8] = b"babe_epoch_changes"; +const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; +const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; +const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 1; fn block_weight_key(block_hash: H) -> Vec { (b"block_weight", block_hash).encode() @@ -52,14 +54,30 @@ fn load_decode(backend: &B, key: &[u8]) -> ClientResult> pub(crate) fn load_epoch_changes( backend: &B, ) -> ClientResult> { - let epoch_changes = load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES)? - .map(|v| Arc::new(Mutex::new(v))) - .unwrap_or_else(|| { - info!(target: "babe", - "Creating empty BABE epoch changes on what appears to be first startup." - ); - SharedEpochChanges::::default() - }); + let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; + + let maybe_epoch_changes = match version { + None => load_decode::<_, EpochChangesForV0>( + backend, + BABE_EPOCH_CHANGES_KEY, + )?.map(|v0| v0.migrate()), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>( + backend, + BABE_EPOCH_CHANGES_KEY, + )?, + Some(other) => { + return Err(ClientError::Backend( + format!("Unsupported BABE DB version: {:?}", other) + )) + }, + }; + + let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { + info!(target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup." + ); + EpochChangesFor::::default() + }))); // rebalance the tree after deserialization. this isn't strictly necessary // since the tree is now rebalanced on every update operation. but since the @@ -77,10 +95,13 @@ pub(crate) fn write_epoch_changes( ) -> R where F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - let encoded_epoch_changes = epoch_changes.encode(); - write_aux( - &[(BABE_EPOCH_CHANGES, encoded_epoch_changes.as_slice())], - ) + BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { + let encoded_epoch_changes = epoch_changes.encode(); + write_aux( + &[(BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), + (BABE_EPOCH_CHANGES_VERSION, version)], + ) + }) } /// Write the cumulative chain-weight of a block ot aux storage. @@ -91,7 +112,6 @@ pub(crate) fn write_block_weight( ) -> R where F: FnOnce(&[(Vec, &[u8])]) -> R, { - let key = block_weight_key(block_hash); block_weight.using_encoded(|s| write_aux( @@ -107,3 +127,72 @@ pub(crate) fn load_block_weight( ) -> ClientResult> { load_decode(backend, block_weight_key(block_hash).as_slice()) } + +#[cfg(test)] +mod test { + use super::*; + use crate::Epoch; + use fork_tree::ForkTree; + use substrate_test_runtime_client; + use sp_core::H256; + use sp_runtime::traits::NumberFor; + use sc_consensus_epochs::{PersistedEpoch, PersistedEpochHeader, EpochHeader}; + use sp_consensus::Error as ConsensusError; + use sc_network_test::Block as TestBlock; + + #[test] + fn load_decode_from_v0_epoch_changes() { + let epoch = Epoch { + start_slot: 0, + authorities: vec![], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + }; + let client = substrate_test_runtime_client::new(); + let mut v0_tree = ForkTree::, _>::new(); + v0_tree.import::<_, ConsensusError>( + Default::default(), + Default::default(), + PersistedEpoch::Regular(epoch), + &|_, _| Ok(false), // Test is single item only so this can be set to false. + ).unwrap(); + + client.insert_aux( + &[(BABE_EPOCH_CHANGES_KEY, + &EpochChangesForV0::::from_raw(v0_tree).encode()[..])], + &[], + ).unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + None, + ); + + let epoch_changes = load_epoch_changes::(&client).unwrap(); + + assert!( + epoch_changes.lock() + .tree() + .iter() + .map(|(_, _, epoch)| epoch.clone()) + .collect::>() == + vec![PersistedEpochHeader::Regular(EpochHeader { + start_slot: 0, + end_slot: 100, + })], + ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. + + write_epoch_changes::( + &epoch_changes.lock(), + |values| { + client.insert_aux(values, &[]).unwrap(); + }, + ); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + Some(1), + ); + } +} diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 967a78e7bfc882debba29d27f1b8187338a0fea1..092bf8153b9bff292bc4aa5cbd86505907219a59 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -62,7 +62,9 @@ pub use sp_consensus_babe::{ BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, BabeConfiguration, AuthorityId, AuthorityPair, AuthoritySignature, BabeAuthorityWeight, VRF_OUTPUT_LENGTH, - digests::{PreDigest, CompatibleDigestItem, NextEpochDescriptor}, + digests::{ + CompatibleDigestItem, NextEpochDescriptor, PreDigest, PrimaryPreDigest, SecondaryPreDigest, + }, }; pub use sp_consensus::SyncOracle; use std::{ @@ -78,7 +80,7 @@ use sp_runtime::{ generic::{BlockId, OpaqueDigestItemId}, Justification, traits::{Block as BlockT, Header, DigestItemFor, Zero}, }; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, NumberFor}; use sc_keystore::KeyStorePtr; use parking_lot::Mutex; use sp_core::Pair; @@ -99,12 +101,12 @@ use sc_client_api::{ use sp_block_builder::BlockBuilder as BlockBuilderApi; use futures::prelude::*; -use log::{warn, debug, info, trace}; +use log::{debug, info, log, trace, warn}; use sc_consensus_slots::{ SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, }; use sc_consensus_epochs::{ - descendent_query, ViableEpoch, SharedEpochChanges, EpochChangesFor, Epoch as EpochT + descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, }; use sp_blockchain::{ Result as ClientResult, Error as ClientError, @@ -220,20 +222,10 @@ fn babe_err(error: Error) -> Error { error } -macro_rules! babe_info { - ($($i: expr),+) => { - { - info!(target: "babe", $($i),+); - format!($($i),+) - } - }; -} - - /// Intermediate value passed to block importer. -pub struct BabeIntermediate { - /// The epoch data, if available. - pub epoch: ViableEpoch, +pub struct BabeIntermediate { + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } /// Intermediate key for Babe engine. @@ -366,7 +358,7 @@ pub fn start_babe(BabeParams { &inherent_data_providers, )?; - babe_info!("Starting BABE Authorship worker"); + info!(target: "babe", "👶 Starting BABE Authorship worker"); Ok(sc_consensus_slots::start_slot_worker( config.0, select_chain, @@ -402,7 +394,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork SO: SyncOracle + Send + Clone, Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpoch; + type EpochData = ViableEpochDescriptor, Epoch>; type Claim = (PreDigest, AuthorityPair); type SyncOracle = SO; type CreateProposer = Pin sc_consensus_slots::SimpleSlotWorker for BabeWork parent: &B::Header, slot_number: u64, ) -> Result { - self.epoch_changes.lock().epoch_for_child_of( + self.epoch_changes.lock().epoch_descriptor_for_child_of( descendent_query(&*self.client), &parent.hash(), parent.number().clone(), slot_number, - |slot| self.config.genesis_epoch(slot) ) .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) } - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize { - epoch_data.as_ref().authorities.len() + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes.lock() + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .map(|epoch| epoch.as_ref().authorities.len()) } fn claim_slot( &self, _parent_header: &B::Header, slot_number: SlotNumber, - epoch_data: &ViableEpoch, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, ) -> Option { debug!(target: "babe", "Attempting to claim slot {}", slot_number); let s = authorship::claim_slot( slot_number, - epoch_data.as_ref(), + self.epoch_changes.lock().viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + )?.as_ref(), &*self.config, &self.keystore, ); @@ -478,7 +474,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork Self::Claim, Self::EpochData, ) -> sp_consensus::BlockImportParams + Send> { - Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch| { + Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch_descriptor| { // sign the pre-sealed hash of the block and then // add it to a digest item. let signature = pair.sign(header_hash.as_ref()); @@ -490,7 +486,7 @@ impl sc_consensus_slots::SimpleSlotWorker for BabeWork import_block.storage_changes = Some(storage_changes); import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); import_block @@ -576,10 +572,10 @@ fn find_pre_digest(header: &B::Header) -> Result> // genesis block doesn't contain a pre digest so let's generate a // dummy one to not break any invariants in the rest of the code if header.number().is_zero() { - return Ok(PreDigest::Secondary { + return Ok(PreDigest::Secondary(SecondaryPreDigest { slot_number: 0, authority_index: 0, - }); + })); } let mut pre_digest: Option<_> = None; @@ -729,18 +725,19 @@ impl Verifier for BabeVerifier where .map_err(Error::::FetchParentHeader)?; let pre_digest = find_pre_digest::(&header)?; - let epoch = { - let epoch_changes = self.epoch_changes.lock(); - epoch_changes.epoch_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot_number(), - |slot| self.config.genesis_epoch(slot), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::::FetchEpoch(parent_hash))? - }; + let epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot_number(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot) + ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; // We add one to the current slot to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of headers @@ -748,7 +745,7 @@ impl Verifier for BabeVerifier where header: header.clone(), pre_digest: Some(pre_digest.clone()), slot_now: slot_now + 1, - epoch: epoch.as_ref(), + epoch: viable_epoch.as_ref(), config: &self.config, }; @@ -808,7 +805,7 @@ impl Verifier for BabeVerifier where import_block.justification = justification; import_block.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); import_block.post_hash = Some(hash); @@ -946,7 +943,7 @@ impl BlockImport for BabeBlockImport BlockImport for BabeBlockImport( + let intermediate = block.take_intermediate::>( INTERMEDIATE_KEY )?; - let epoch = intermediate.epoch; - let first_in_epoch = parent_slot < epoch.as_ref().start_slot; - (epoch, first_in_epoch, parent_weight) + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) }; let total_weight = parent_weight + pre_digest.added_weight(); @@ -994,13 +991,38 @@ impl BlockImport for BabeBlockImport= start slot {}).", - epoch.as_ref().epoch_index, hash, slot_number, epoch.as_ref().start_slot); - babe_info!("Next epoch starts at slot {}", next_epoch.as_ref().start_slot); + let viable_epoch = epoch_changes.viable_epoch( + &epoch_descriptor, + |slot| self.config.genesis_epoch(slot), + ).ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); // prune the tree of epochs not part of the finalized chain or // that are not live anymore, and then track the given epoch change @@ -1227,7 +1249,8 @@ pub mod test_helpers { HeaderMetadata, C::Api: BabeApi, { - let epoch = link.epoch_changes.lock().epoch_for_child_of( + let epoch_changes = link.epoch_changes.lock(); + let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(client), &parent.hash(), parent.number().clone(), @@ -1237,7 +1260,7 @@ pub mod test_helpers { authorship::claim_slot( slot_number, - epoch.as_ref(), + &epoch, &link.config, keystore, ).map(|(digest, _)| digest) diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index a5493918f001de75f828eaa573cbafb9c2fcb8f6..20b924669d6147816c0fbf9c0c8e85937b17c1af 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -122,7 +122,7 @@ impl DummyProposer { // figure out if we should add a consensus digest, since the test runtime // doesn't. let epoch_changes = self.factory.epoch_changes.lock(); - let epoch = epoch_changes.epoch_for_child_of( + let epoch = epoch_changes.epoch_data_for_child_of( descendent_query(&*self.factory.client), &self.parent_hash, self.parent_number, @@ -130,8 +130,7 @@ impl DummyProposer { |slot| self.factory.config.genesis_epoch(slot), ) .expect("client has data to find epoch") - .expect("can compute epoch for baked block") - .into_inner(); + .expect("can compute epoch for baked block"); let first_in_epoch = self.parent_slot < epoch.start_slot; if first_in_epoch { @@ -421,7 +420,7 @@ fn run_one_test( panic!("Verification failed for {:?}: {}", h, e); } } - + Poll::<()>::Pending }), future::select(future::join_all(import_notifications), future::join_all(babe_futures)) @@ -554,10 +553,10 @@ fn propose_and_import_block( let pre_digest = sp_runtime::generic::Digest { logs: vec![ Item::babe_pre_digest( - PreDigest::Secondary { + PreDigest::Secondary(SecondaryPreDigest { authority_index: 0, slot_number, - }, + }), ), ], }; @@ -566,12 +565,11 @@ fn propose_and_import_block( let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - let epoch = proposer_factory.epoch_changes.lock().epoch_for_child_of( + let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( descendent_query(&*proposer_factory.client), &parent_hash, *parent.number(), slot_number, - |slot| proposer_factory.config.genesis_epoch(slot) ).unwrap().unwrap(); let seal = { @@ -595,7 +593,7 @@ fn propose_and_import_block( import.body = Some(block.extrinsics); import.intermediates.insert( Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate { epoch }) as Box, + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, ); import.fork_choice = Some(ForkChoiceStrategy::LongestChain); let import_result = block_import.import_block(import, Default::default()).unwrap(); @@ -637,13 +635,13 @@ fn importing_block_one_sets_genesis_epoch() { let genesis_epoch = data.link.config.genesis_epoch(999); let epoch_changes = data.link.epoch_changes.lock(); - let epoch_for_second_block = epoch_changes.epoch_for_child_of( + let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( descendent_query(&*client), &block_hash, 1, 1000, |slot| data.link.config.genesis_epoch(slot), - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(epoch_for_second_block, genesis_epoch); } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 70418b8aea1e38c4ac890ded5de1f64674b389df..2fd37280b3b369bdb1e73895dc9ae683030eaa23 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -15,11 +15,12 @@ // along with Substrate. If not, see . //! Verification for BABE headers. -use schnorrkel::vrf::{VRFOutput, VRFProof}; use sp_runtime::{traits::Header, traits::DigestItemFor}; use sp_core::{Pair, Public}; -use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityIndex, AuthorityPair, AuthorityId}; -use sp_consensus_babe::digests::{PreDigest, CompatibleDigestItem}; +use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityPair, AuthorityId}; +use sp_consensus_babe::digests::{ + PreDigest, PrimaryPreDigest, SecondaryPreDigest, CompatibleDigestItem +}; use sc_consensus_slots::CheckedHeader; use log::{debug, trace}; use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; @@ -93,27 +94,23 @@ pub(super) fn check_header( }; match &pre_digest { - PreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { + PreDigest::Primary(primary) => { debug!(target: "babe", "Verifying Primary block"); - let digest = (vrf_output, vrf_proof, *authority_index, *slot_number); - check_primary_header::( pre_hash, - digest, + primary, sig, &epoch, config.c, )?; }, - PreDigest::Secondary { authority_index, slot_number } if config.secondary_slots => { + PreDigest::Secondary(secondary) if config.secondary_slots => { debug!(target: "babe", "Verifying Secondary block"); - let digest = (*authority_index, *slot_number); - check_secondary_header::( pre_hash, - digest, + secondary, sig, &epoch, )?; @@ -143,25 +140,23 @@ pub(super) struct VerifiedHeaderInfo { /// its parent since it is a primary block. fn check_primary_header( pre_hash: B::Hash, - pre_digest: (&VRFOutput, &VRFProof, AuthorityIndex, SlotNumber), + pre_digest: &PrimaryPreDigest, signature: AuthoritySignature, epoch: &Epoch, c: (u64, u64), ) -> Result<(), Error> { - let (vrf_output, vrf_proof, authority_index, slot_number) = pre_digest; - - let author = &epoch.authorities[authority_index as usize].0; + let author = &epoch.authorities[pre_digest.authority_index as usize].0; if AuthorityPair::verify(&signature, pre_hash, &author) { let (inout, _) = { let transcript = make_transcript( &epoch.randomness, - slot_number, + pre_digest.slot_number, epoch.epoch_index, ); schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, vrf_output, vrf_proof) + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) }).map_err(|s| { babe_err(Error::VRFVerificationFailed(s)) })? @@ -170,7 +165,7 @@ fn check_primary_header( let threshold = calculate_primary_threshold( c, &epoch.authorities, - authority_index as usize, + pre_digest.authority_index as usize, ); if !check_primary_threshold(&inout, threshold) { @@ -189,21 +184,19 @@ fn check_primary_header( /// compared to its parent since it is a secondary block. fn check_secondary_header( pre_hash: B::Hash, - pre_digest: (AuthorityIndex, SlotNumber), + pre_digest: &SecondaryPreDigest, signature: AuthoritySignature, epoch: &Epoch, ) -> Result<(), Error> { - let (authority_index, slot_number) = pre_digest; - // check the signature is valid under the expected authority and // chain state. let expected_author = secondary_slot_author( - slot_number, + pre_digest.slot_number, &epoch.authorities, epoch.randomness, ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; - let author = &epoch.authorities[authority_index as usize].0; + let author = &epoch.authorities[pre_digest.authority_index as usize].0; if expected_author != author { return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index fe2365429892121a4092a1f63021725af6cf5028..3496141ec712bdc4c8daa043e10faaecb13d8b7e 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" @@ -9,9 +9,12 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } parking_lot = "0.10.0" -fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.2"} -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" , version = "2.0.0-alpha.2"} +fork-tree = { version = "2.0.0-alpha.5", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.5"} +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "2.0.0-alpha.5"} + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 05ee611de137853e32ef4bfc4e22e53f6c17f914..001c172b3490573dea6a0dcfbefc428cf79c13aa 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -16,7 +16,9 @@ //! Generic utilities for epoch-based consensus engines. -use std::{sync::Arc, ops::Add}; +pub mod migration; + +use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; use parking_lot::Mutex; use codec::{Encode, Decode}; use fork_tree::ForkTree; @@ -67,60 +69,126 @@ impl<'a, H, Block> IsDescendentOfBuilder } /// Epoch data, distinguish whether it is genesis or not. +/// +/// Once an epoch is created, it must have a known `start_slot` and `end_slot`, which cannot be +/// changed. Consensus engine may modify any other data in the epoch, if needed. pub trait Epoch { /// Descriptor for the next epoch. type NextEpochDescriptor; /// Type of the slot number. - type SlotNumber: Ord; + type SlotNumber: Ord + Copy; + /// The starting slot of the epoch. + fn start_slot(&self) -> Self::SlotNumber; + /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, + /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. + fn end_slot(&self) -> Self::SlotNumber; /// Increment the epoch data, using the next epoch descriptor. fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; +} - /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, +impl<'a, E: Epoch> From<&'a E> for EpochHeader { + fn from(epoch: &'a E) -> EpochHeader { + Self { + start_slot: epoch.start_slot(), + end_slot: epoch.end_slot(), + } + } +} + +/// Header of epoch data, consisting of start and end slot. +#[derive(Eq, PartialEq, Encode, Decode, Debug)] +pub struct EpochHeader { + /// The starting slot of the epoch. + pub start_slot: E::SlotNumber, + /// The end slot of the epoch. This is NOT inclusive to the epoch, /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - fn end_slot(&self) -> Self::SlotNumber; - /// Produce the "start slot" of the epoch. - fn start_slot(&self) -> Self::SlotNumber; + pub end_slot: E::SlotNumber, } -/// An unimported genesis epoch. -pub struct UnimportedGenesisEpoch(Epoch); +impl Clone for EpochHeader { + fn clone(&self) -> Self { + Self { + start_slot: self.start_slot, + end_slot: self.end_slot, + } + } +} + +/// Position of the epoch identifier. +#[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)] +pub enum EpochIdentifierPosition { + /// The identifier points to a genesis epoch `epoch_0`. + Genesis0, + /// The identifier points to a genesis epoch `epoch_1`. + Genesis1, + /// The identifier points to a regular epoch. + Regular, +} + +/// Epoch identifier. +#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] +pub struct EpochIdentifier { + /// Location of the epoch. + pub position: EpochIdentifierPosition, + /// Hash of the block when the epoch is signaled. + pub hash: Hash, + /// Number of the block when the epoch is signaled. + pub number: Number, +} /// The viable epoch under which a block can be verified. /// /// If this is the first non-genesis block in the chain, then it will /// hold an `UnimportedGenesis` epoch. -pub enum ViableEpoch { - /// Genesis viable epoch data. - Genesis(UnimportedGenesisEpoch), +pub enum ViableEpoch { + /// Unimported genesis viable epoch data. + UnimportedGenesis(E), /// Regular viable epoch data. - Regular(Epoch), + Signaled(ERef), } -impl From for ViableEpoch { - fn from(epoch: Epoch) -> ViableEpoch { - ViableEpoch::Regular(epoch) +impl AsRef for ViableEpoch where + ERef: Borrow, +{ + fn as_ref(&self) -> &E { + match *self { + ViableEpoch::UnimportedGenesis(ref e) => e, + ViableEpoch::Signaled(ref e) => e.borrow(), + } } } -impl AsRef for ViableEpoch { - fn as_ref(&self) -> &Epoch { +impl AsMut for ViableEpoch where + ERef: BorrowMut, +{ + fn as_mut(&mut self) -> &mut E { match *self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(ref e)) => e, - ViableEpoch::Regular(ref e) => e, + ViableEpoch::UnimportedGenesis(ref mut e) => e, + ViableEpoch::Signaled(ref mut e) => e.borrow_mut(), } } } -impl ViableEpoch where - Epoch: crate::Epoch + Clone, +impl ViableEpoch where + E: Epoch + Clone, + ERef: Borrow, { /// Extract the underlying epoch, disregarding the fact that a genesis /// epoch may be unimported. - pub fn into_inner(self) -> Epoch { + pub fn into_cloned_inner(self) -> E { + match self { + ViableEpoch::UnimportedGenesis(e) => e, + ViableEpoch::Signaled(e) => e.borrow().clone(), + } + } + + /// Get cloned value for the viable epoch. + pub fn into_cloned(self) -> ViableEpoch { match self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(e)) => e, - ViableEpoch::Regular(e) => e, + ViableEpoch::UnimportedGenesis(e) => + ViableEpoch::UnimportedGenesis(e), + ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), } } @@ -128,36 +196,84 @@ impl ViableEpoch where /// into the fork-tree. pub fn increment( &self, - next_descriptor: Epoch::NextEpochDescriptor - ) -> IncrementedEpoch { + next_descriptor: E::NextEpochDescriptor + ) -> IncrementedEpoch { let next = self.as_ref().increment(next_descriptor); let to_persist = match *self { - ViableEpoch::Genesis(UnimportedGenesisEpoch(ref epoch_0)) => + ViableEpoch::UnimportedGenesis(ref epoch_0) => PersistedEpoch::Genesis(epoch_0.clone(), next), - ViableEpoch::Regular(_) => PersistedEpoch::Regular(next), + ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), }; IncrementedEpoch(to_persist) } } -/// The data type encoded on disk. -#[derive(Clone, Encode, Decode)] -pub enum PersistedEpoch { +/// Descriptor for a viable epoch. +#[derive(PartialEq, Eq, Clone, Debug)] +pub enum ViableEpochDescriptor { + /// The epoch is an unimported genesis, with given start slot number. + UnimportedGenesis(E::SlotNumber), + /// The epoch is signaled and has been imported, with given identifier and header. + Signaled(EpochIdentifier, EpochHeader) +} + +impl ViableEpochDescriptor { + /// Start slot of the descriptor. + pub fn start_slot(&self) -> E::SlotNumber { + match self { + Self::UnimportedGenesis(start_slot) => *start_slot, + Self::Signaled(_, header) => header.start_slot, + } + } +} + +/// Persisted epoch stored in EpochChanges. +#[derive(Clone, Encode, Decode, Debug)] +pub enum PersistedEpoch { /// Genesis persisted epoch data. epoch_0, epoch_1. - Genesis(Epoch, Epoch), + Genesis(E, E), /// Regular persisted epoch data. epoch_n. - Regular(Epoch), + Regular(E), +} + +impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { + fn from(epoch: &'a PersistedEpoch) -> Self { + match epoch { + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => + PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), + PersistedEpoch::Regular(ref epoch_n) => + PersistedEpochHeader::Regular(epoch_n.into()), + } + } +} + +/// Persisted epoch header stored in ForkTree. +#[derive(Encode, Decode, PartialEq, Eq)] +pub enum PersistedEpochHeader { + /// Genesis persisted epoch header. epoch_0, epoch_1. + Genesis(EpochHeader, EpochHeader), + /// Regular persisted epoch header. epoch_n. + Regular(EpochHeader), +} + +impl Clone for PersistedEpochHeader { + fn clone(&self) -> Self { + match self { + Self::Genesis(epoch_0, epoch_1) => Self::Genesis(epoch_0.clone(), epoch_1.clone()), + Self::Regular(epoch_n) => Self::Regular(epoch_n.clone()), + } + } } /// A fresh, incremented epoch to import into the underlying fork-tree. /// /// Create this with `ViableEpoch::increment`. #[must_use = "Freshly-incremented epoch must be imported with `EpochChanges::import`"] -pub struct IncrementedEpoch(PersistedEpoch); +pub struct IncrementedEpoch(PersistedEpoch); -impl AsRef for IncrementedEpoch { - fn as_ref(&self) -> &Epoch { +impl AsRef for IncrementedEpoch { + fn as_ref(&self) -> &E { match self.0 { PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, PersistedEpoch::Regular(ref epoch_n) => epoch_n, @@ -181,8 +297,9 @@ impl AsRef for IncrementedEpoch { /// /// Further epochs (epoch_2, ..., epoch_n) each get their own entry. #[derive(Clone, Encode, Decode)] -pub struct EpochChanges { - inner: ForkTree>, +pub struct EpochChanges { + inner: ForkTree>, + epochs: BTreeMap<(Hash, Number), PersistedEpoch>, } // create a fake header hash which hasn't been included in the chain. @@ -194,19 +311,18 @@ fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { h } -impl Default for EpochChanges where - Hash: PartialEq, +impl Default for EpochChanges where + Hash: PartialEq + Ord, Number: Ord, { fn default() -> Self { - EpochChanges { inner: ForkTree::new() } + EpochChanges { inner: ForkTree::new(), epochs: BTreeMap::new() } } } -impl EpochChanges where - Hash: PartialEq + AsRef<[u8]> + AsMut<[u8]> + Copy, +impl EpochChanges where + Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, Number: Ord + One + Zero + Add + Copy, - Epoch: crate::Epoch + Clone, { /// Create a new epoch change. pub fn new() -> Self { @@ -227,45 +343,162 @@ impl EpochChanges where descendent_of_builder: D, hash: &Hash, number: Number, - slot: Epoch::SlotNumber, + slot: E::SlotNumber, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(None); - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot(), - PersistedEpoch::Regular(ref epoch_n) => - slot >= epoch_n.end_slot(), + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(_, ref epoch_1) => + slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => + slot >= epoch_n.end_slot, }; // prune any epochs which could not be _live_ as of the children of the // finalized block, i.e. re-root the fork tree to the oldest ancestor of // (hash, number) where epoch.end_slot() >= finalized_slot - self.inner.prune( + let removed = self.inner.prune( hash, &number, &is_descendent_of, &predicate, )?; + for (hash, number, _) in removed { + self.epochs.remove(&(hash, number)); + } + Ok(()) } + /// Get a reference to an epoch with given identifier. + pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { + self.epochs.get(&(id.hash, id.number)) + .and_then(|v| { + match v { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), + _ => None, + } + }) + } + + /// Get a reference to a viable epoch with given descriptor. + pub fn viable_epoch( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option> where + G: FnOnce(E::SlotNumber) -> E + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(&identifier).map(ViableEpoch::Signaled) + }, + } + } + + /// Get a mutable reference to an epoch with given identifier. + pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { + self.epochs.get_mut(&(id.hash, id.number)) + .and_then(|v| { + match v { + PersistedEpoch::Genesis(ref mut epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), + PersistedEpoch::Genesis(_, ref mut epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), + PersistedEpoch::Regular(ref mut epoch_n) + if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), + _ => None, + } + }) + } + + /// Get a mutable reference to a viable epoch with given descriptor. + pub fn viable_epoch_mut( + &mut self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option> where + G: FnOnce(E::SlotNumber) -> E + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch_mut(&identifier).map(ViableEpoch::Signaled) + }, + } + } + + /// Get the epoch data from an epoch descriptor. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. + pub fn epoch_data( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G + ) -> Option where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(make_genesis(*slot_number)) + }, + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(&identifier).cloned() + }, + } + } + + /// Finds the epoch data for a child of the given block. Similar to + /// `epoch_descriptor_for_child_of` but returns the full data. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. + pub fn epoch_data_for_child_of, G>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: E::SlotNumber, + make_genesis: G, + ) -> Result, fork_tree::Error> where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + let descriptor = self.epoch_descriptor_for_child_of( + descendent_of_builder, + parent_hash, + parent_number, + slot_number + )?; + + Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) + } + /// Finds the epoch for a child of the given block, assuming the given slot number. /// /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the /// tree. - pub fn epoch_for_child_of, G>( + pub fn epoch_descriptor_for_child_of>( &self, descendent_of_builder: D, parent_hash: &Hash, parent_number: Number, - slot_number: Epoch::SlotNumber, - make_genesis: G, - ) -> Result>, fork_tree::Error> - where G: FnOnce(Epoch::SlotNumber) -> Epoch - { + slot_number: E::SlotNumber, + ) -> Result>, fork_tree::Error> { // find_node_where will give you the node in the fork-tree which is an ancestor // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, // then it won't be returned. we need to create a new fake chain head hash which @@ -277,8 +510,7 @@ impl EpochChanges where if parent_number == Zero::zero() { // need to insert the genesis epoch. - let genesis_epoch = make_genesis(slot_number); - return Ok(Some(ViableEpoch::Genesis(UnimportedGenesisEpoch(genesis_epoch)))); + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))) } // We want to find the deepest node in the tree which is an ancestor @@ -286,11 +518,11 @@ impl EpochChanges where // slot of our block. The genesis special-case doesn't need to look // at epoch_1 -- all we're doing here is figuring out which node // we need. - let predicate = |epoch: &PersistedEpoch| match *epoch { - PersistedEpoch::Genesis(ref epoch_0, _) => - epoch_0.start_slot() <= slot_number, - PersistedEpoch::Regular(ref epoch_n) => - epoch_n.start_slot() <= slot_number, + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(ref epoch_0, _) => + epoch_0.start_slot <= slot_number, + PersistedEpochHeader::Regular(ref epoch_n) => + epoch_n.start_slot <= slot_number, }; self.inner.find_node_where( @@ -299,18 +531,27 @@ impl EpochChanges where &is_descendent_of, &predicate, ) - .map(|n| n.map(|node| ViableEpoch::Regular(match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot() <= slot_number { - epoch_1.clone() - } else { - epoch_0.clone() - }, - PersistedEpoch::Regular(ref epoch_n) => epoch_n.clone(), - }))) + .map(|n| { + n.map(|node| (match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => + if epoch_1.start_slot <= slot_number { + (EpochIdentifierPosition::Genesis1, epoch_1.clone()) + } else { + (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + }, + PersistedEpochHeader::Regular(ref epoch_n) => + (EpochIdentifierPosition::Regular, epoch_n.clone()), + }, node)).map(|((position, header), node)| { + ViableEpochDescriptor::Signaled(EpochIdentifier { + position, + hash: node.hash, + number: node.number + }, header) + }) + }) } /// Import a new epoch-change, signalled at the given block. @@ -324,26 +565,30 @@ impl EpochChanges where hash: Hash, number: Number, parent_hash: Hash, - epoch: IncrementedEpoch, + epoch: IncrementedEpoch, ) -> Result<(), fork_tree::Error> { let is_descendent_of = descendent_of_builder .build_is_descendent_of(Some((hash, parent_hash))); + let header = PersistedEpochHeader::::from(&epoch.0); let res = self.inner.import( hash, number, - epoch.0, + header, &is_descendent_of, ); match res { - Ok(_) | Err(fork_tree::Error::Duplicate) => Ok(()), + Ok(_) | Err(fork_tree::Error::Duplicate) => { + self.epochs.insert((hash, number), epoch.0); + Ok(()) + }, Err(e) => Err(e), } } /// Return the inner fork tree. - pub fn tree(&self) -> &ForkTree> { + pub fn tree(&self) -> &ForkTree> { &self.inner } } @@ -443,39 +688,34 @@ mod tests { } }; - let make_genesis = |slot| Epoch { - start_slot: slot, - duration: 100, - }; - - let epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( + let epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 10101, - &make_genesis, ).unwrap().unwrap(); match genesis_epoch { - ViableEpoch::Genesis(_) => {}, + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10101u64); + }, _ => panic!("should be unimported genesis"), }; - assert_eq!(genesis_epoch.as_ref(), &make_genesis(10101)); - let genesis_epoch_2 = epoch_changes.epoch_for_child_of( + let genesis_epoch_2 = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 10102, - &make_genesis, ).unwrap().unwrap(); match genesis_epoch_2 { - ViableEpoch::Genesis(_) => {}, + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10102u64); + }, _ => panic!("should be unimported genesis"), }; - assert_eq!(genesis_epoch_2.as_ref(), &make_genesis(10102)); } #[test] @@ -499,18 +739,20 @@ mod tests { duration: 100, }; - let mut epoch_changes = EpochChanges::new(); - let genesis_epoch = epoch_changes.epoch_for_child_of( + let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 100, - &make_genesis, ).unwrap().unwrap(); - assert_eq!(genesis_epoch.as_ref(), &make_genesis(100)); + assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - let import_epoch_1 = genesis_epoch.increment(()); + let import_epoch_1 = epoch_changes + .viable_epoch(&genesis_epoch, &make_genesis) + .unwrap() + .increment(()); let epoch_1 = import_epoch_1.as_ref().clone(); epoch_changes.import( @@ -520,7 +762,7 @@ mod tests { *b"0", import_epoch_1, ).unwrap(); - let genesis_epoch = genesis_epoch.into_inner(); + let genesis_epoch = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); assert!(is_descendent_of(b"0", b"A").unwrap()); @@ -529,13 +771,13 @@ mod tests { { // x is still within the genesis epoch. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, end_slot - 1, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, genesis_epoch); } @@ -543,13 +785,13 @@ mod tests { { // x is now at the next epoch, because the block is now at the // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, end_slot, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, epoch_1); } @@ -557,13 +799,13 @@ mod tests { { // x is now at the next epoch, because the block is now after // start slot of epoch 1. - let x = epoch_changes.epoch_for_child_of( + let x = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, epoch_1.end_slot() - 1, &make_genesis, - ).unwrap().unwrap().into_inner(); + ).unwrap().unwrap(); assert_eq!(x, epoch_1); } @@ -596,47 +838,54 @@ mod tests { // insert genesis epoch for A { - let genesis_epoch_a = epoch_changes.epoch_for_child_of( + let genesis_epoch_a_descriptor = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 100, - &make_genesis, ).unwrap().unwrap(); + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + epoch_changes.import( &is_descendent_of, *b"A", 1, *b"0", - genesis_epoch_a.increment(next_descriptor.clone()), + incremented_epoch, ).unwrap(); - } // insert genesis epoch for X { - let genesis_epoch_x = epoch_changes.epoch_for_child_of( + let genesis_epoch_x_descriptor = epoch_changes.epoch_descriptor_for_child_of( &is_descendent_of, b"0", 0, 1000, - &make_genesis, ).unwrap().unwrap(); + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + epoch_changes.import( &is_descendent_of, *b"X", 1, *b"0", - genesis_epoch_x.increment(next_descriptor.clone()), + incremented_epoch, ).unwrap(); } // now check that the genesis epochs for our respective block 1s // respect the chain structure. { - let epoch_for_a_child = epoch_changes.epoch_for_child_of( + let epoch_for_a_child = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"A", 1, @@ -644,9 +893,9 @@ mod tests { &make_genesis, ).unwrap().unwrap(); - assert_eq!(epoch_for_a_child.into_inner(), make_genesis(100)); + assert_eq!(epoch_for_a_child, make_genesis(100)); - let epoch_for_x_child = epoch_changes.epoch_for_child_of( + let epoch_for_x_child = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"X", 1, @@ -654,9 +903,9 @@ mod tests { &make_genesis, ).unwrap().unwrap(); - assert_eq!(epoch_for_x_child.into_inner(), make_genesis(1000)); + assert_eq!(epoch_for_x_child, make_genesis(1000)); - let epoch_for_x_child_before_genesis = epoch_changes.epoch_for_child_of( + let epoch_for_x_child_before_genesis = epoch_changes.epoch_data_for_child_of( &is_descendent_of, b"X", 1, diff --git a/client/consensus/epochs/src/migration.rs b/client/consensus/epochs/src/migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..e4717b5584e0ec67801ce3d264e90ff350286deb --- /dev/null +++ b/client/consensus/epochs/src/migration.rs @@ -0,0 +1,55 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Migration types for epoch changes. + +use std::collections::BTreeMap; +use codec::{Encode, Decode}; +use fork_tree::ForkTree; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; + +/// Legacy definition of epoch changes. +#[derive(Clone, Encode, Decode)] +pub struct EpochChangesV0 { + inner: ForkTree>, +} + +/// Type alias for legacy definition of epoch changes. +pub type EpochChangesForV0 = EpochChangesV0<::Hash, NumberFor, Epoch>; + +impl EpochChangesV0 where + Hash: PartialEq + Ord + Copy, + Number: Ord + Copy, +{ + /// Create a new value of this type from raw. + pub fn from_raw(inner: ForkTree>) -> Self { + Self { inner } + } + + /// Migrate the type into current epoch changes definition. + pub fn migrate(self) -> EpochChanges { + let mut epochs = BTreeMap::new(); + + let inner = self.inner.map(&mut |hash, number, data| { + let header = PersistedEpochHeader::from(&data); + epochs.insert((*hash, *number), data); + header + }); + + EpochChanges { inner, epochs } + } +} diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 11fee0e3f9e608c73e716f5ac8a84480140e31d1..b7e5f7b0b1a9ff7c3f14303fa8c0065aa572048d 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" @@ -10,27 +10,31 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] derive_more = "0.99.2" -futures = "0.3.1" +futures = "0.3.4" jsonrpc-core = "14.0.5" jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.5" log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } +assert_matches = "1.3.0" -sc-client = { path = "../../../client" , version = "0.8.0-alpha.2"} -sc-client-api = { path = "../../../client/api" , version = "2.0.0-alpha.2"} -sc-transaction-pool = { path = "../../transaction-pool" , version = "2.0.0-alpha.2"} -sp-blockchain = { path = "../../../primitives/blockchain" , version = "2.0.0-alpha.2"} -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common" , version = "0.8.0-alpha.2"} -sp-inherents = { path = "../../../primitives/inherents" , version = "2.0.0-alpha.2"} -sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.2"} -sp-transaction-pool = { path = "../../../primitives/transaction-pool" , version = "2.0.0-alpha.2"} +sc-client = { path = "../../../client" , version = "0.8.0-alpha.5"} +sc-client-api = { path = "../../../client/api" , version = "2.0.0-alpha.5"} +sc-transaction-pool = { path = "../../transaction-pool" , version = "2.0.0-alpha.5"} +sp-blockchain = { path = "../../../primitives/blockchain" , version = "2.0.0-alpha.5"} +sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common" , version = "0.8.0-alpha.5"} +sp-inherents = { path = "../../../primitives/inherents" , version = "2.0.0-alpha.5"} +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.5"} +sp-transaction-pool = { path = "../../../primitives/transaction-pool" , version = "2.0.0-alpha.5"} [dev-dependencies] -sc-basic-authorship = { path = "../../basic-authorship" , version = "0.8.0-alpha.2"} +sc-basic-authorship = { path = "../../basic-authorship" , version = "0.8.0-alpha.5"} substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" , version = "2.0.0-dev"} substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" , version = "2.0.0-dev"} tokio = { version = "0.2", features = ["rt-core", "macros"] } env_logger = "0.7.0" tempfile = "3.1.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index 419159709f2db84ae51cba0fb861458123b63332..5780a25f97256331eb1d11b2370dd838ec112ac7 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -23,42 +23,47 @@ use sp_runtime::{ generic::BlockId, }; use std::sync::Arc; -use sc_client_api::backend::Backend as ClientBackend; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use std::marker::PhantomData; /// params for block finalization. -pub struct FinalizeBlockParams { +pub struct FinalizeBlockParams { /// hash of the block pub hash: ::Hash, /// sender to report errors/success to the rpc. pub sender: rpc::Sender<()>, /// finalization justification pub justification: Option, - /// client backend - pub backend: Arc, + /// Finalizer trait object. + pub finalizer: Arc, + /// phantom type to pin the Backend type + pub _phantom: PhantomData, } + /// finalizes a block in the backend with the given params. -pub async fn finalize_block(params: FinalizeBlockParams) +pub async fn finalize_block(params: FinalizeBlockParams) where B: BlockT, + F: Finalizer, CB: ClientBackend, { let FinalizeBlockParams { hash, mut sender, justification, - backend: back_end, + finalizer, .. } = params; - match back_end.finalize_block(BlockId::Hash(hash), justification) { + match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { Err(e) => { log::warn!("Failed to finalize block {:?}", e); rpc::send_result(&mut sender, Err(e.into())) } Ok(()) => { - log::info!("Successfully finalized block: {}", hash); + log::info!("✅ Successfully finalized block: {}", hash); rpc::send_result(&mut sender, Ok(())) } } -} \ No newline at end of file +} diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 18dc91ad34d625fd6b7ce661bea7a56aeebdcc38..8294ae049f65808c4797afc9e34f2e6b423b6b39 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -17,66 +17,32 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. +use futures::prelude::*; use sp_consensus::{ - self, BlockImport, Environment, Proposer, BlockCheckParams, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - ImportResult, SelectChain, - import_queue::{ - BasicQueue, - CacheKeyId, - Verifier, - BoxBlockImport, - }, + Environment, Proposer, ForkChoiceStrategy, BlockImportParams, BlockOrigin, SelectChain, + import_queue::{BasicQueue, CacheKeyId, Verifier, BoxBlockImport}, }; +use sp_blockchain::HeaderBackend; use sp_inherents::InherentDataProviders; use sp_runtime::{traits::Block as BlockT, Justification}; -use sc_client_api::backend::Backend as ClientBackend; -use futures::prelude::*; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; use sc_transaction_pool::txpool; -use std::collections::HashMap; -use std::sync::Arc; +use std::{sync::Arc, marker::PhantomData}; -pub mod rpc; mod error; mod finalize_block; mod seal_new_block; -use finalize_block::{finalize_block, FinalizeBlockParams}; -use seal_new_block::{seal_new_block, SealBlockParams}; -pub use error::Error; -pub use rpc::{EngineCommand, CreatedBlock}; - -/// The synchronous block-import worker of the engine. -pub struct ManualSealBlockImport { - inner: I, -} - -impl From for ManualSealBlockImport { - fn from(i: I) -> Self { - ManualSealBlockImport { inner: i } - } -} - -impl BlockImport for ManualSealBlockImport - where - B: BlockT, - I: BlockImport, -{ - type Error = I::Error; - type Transaction = (); - - fn check_block(&mut self, block: BlockCheckParams) -> Result - { - self.inner.check_block(block) - } +pub mod rpc; - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result { - self.inner.import_block(block, cache) - } -} +use self::{ + finalize_block::{finalize_block, FinalizeBlockParams}, + seal_new_block::{seal_new_block, SealBlockParams}, +}; +pub use self::{ + error::Error, + rpc::{EngineCommand, CreatedBlock}, +}; +use sc_client_api::{TransactionFor, Backend}; /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; @@ -92,7 +58,7 @@ impl Verifier for ManualSealVerifier { let mut import_params = BlockImportParams::new(origin, header); import_params.justification = justification; import_params.body = body; - import_params.finalized = true; + import_params.finalized = false; import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); Ok((import_params, None)) @@ -100,37 +66,43 @@ impl Verifier for ManualSealVerifier { } /// Instantiate the import queue for the manual seal consensus engine. -pub fn import_queue(block_import: BoxBlockImport) -> BasicQueue +pub fn import_queue( + block_import: BoxBlockImport> +) -> BasicQueue> + where + Block: BlockT, + B: Backend + 'static, { BasicQueue::new( ManualSealVerifier, - block_import, + Box::new(block_import), None, None, ) } /// Creates the background authorship task for the manual seal engine. -pub async fn run_manual_seal( +pub async fn run_manual_seal( mut block_import: BoxBlockImport, mut env: E, - backend: Arc, + client: Arc, pool: Arc>, - mut seal_block_channel: S, - select_chain: C, + mut commands_stream: S, + select_chain: SC, inherent_data_providers: InherentDataProviders, ) where + A: txpool::ChainApi::Hash> + 'static, B: BlockT + 'static, + C: HeaderBackend + Finalizer + 'static, CB: ClientBackend + 'static, E: Environment + 'static, E::Error: std::fmt::Display, >::Error: std::fmt::Display, - A: txpool::ChainApi::Hash> + 'static, S: Stream::Hash>> + Unpin + 'static, - C: SelectChain + 'static, + SC: SelectChain + 'static, { - while let Some(command) = seal_block_channel.next().await { + while let Some(command) = commands_stream.next().await { match command { EngineCommand::SealNewBlock { create_empty, @@ -149,7 +121,7 @@ pub async fn run_manual_seal( block_import: &mut block_import, inherent_data_provider: &inherent_data_providers, pool: pool.clone(), - backend: backend.clone(), + client: client.clone(), } ).await; } @@ -159,7 +131,8 @@ pub async fn run_manual_seal( hash, sender, justification, - backend: backend.clone(), + finalizer: client.clone(), + _phantom: PhantomData, } ).await } @@ -170,26 +143,28 @@ pub async fn run_manual_seal( /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. -pub async fn run_instant_seal( +pub async fn run_instant_seal( block_import: BoxBlockImport, env: E, - backend: Arc, + client: Arc, pool: Arc>, - select_chain: C, + select_chain: SC, inherent_data_providers: InherentDataProviders, ) where A: txpool::ChainApi::Hash> + 'static, B: BlockT + 'static, + C: HeaderBackend + Finalizer + 'static, CB: ClientBackend + 'static, E: Environment + 'static, E::Error: std::fmt::Display, >::Error: std::fmt::Display, - C: SelectChain + 'static + SC: SelectChain + 'static { // instant-seal creates blocks as soon as transactions are imported // into the transaction pool. - let seal_block_channel = pool.validated_pool().import_notification_stream() + let commands_stream = pool.validated_pool() + .import_notification_stream() .map(|_| { EngineCommand::SealNewBlock { create_empty: false, @@ -202,9 +177,9 @@ pub async fn run_instant_seal( run_manual_seal( block_import, env, - backend, + client, pool, - seal_block_channel, + commands_stream, select_chain, inherent_data_providers, ).await @@ -224,11 +199,9 @@ mod tests { txpool::Options, }; use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use sp_transaction_pool::TransactionPool; + use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; use sp_runtime::generic::BlockId; - use sp_blockchain::HeaderBackend; use sp_consensus::ImportedAux; - use sc_client::LongestChain; use sp_inherents::InherentDataProviders; use sc_basic_authorship::ProposerFactory; @@ -236,12 +209,13 @@ mod tests { Arc::new(TestApi::empty()) } + const SOURCE: TransactionSource = TransactionSource::External; + #[tokio::test] async fn instant_seal() { let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - let select_chain = LongestChain::new(backend.clone()); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); let pool = Arc::new(BasicPool::new(Options::default(), api()).0); let env = ProposerFactory::new( @@ -266,7 +240,7 @@ mod tests { let future = run_manual_seal( Box::new(client.clone()), env, - backend.clone(), + client.clone(), pool.pool().clone(), stream, select_chain, @@ -278,7 +252,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), uxt(Alice, 0)).await; + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); // assert that the background task returns ok @@ -298,15 +272,14 @@ mod tests { } ); // assert that there's a new block in the db. - assert!(backend.blockchain().header(BlockId::Number(1)).unwrap().is_some()) + assert!(client.header(&BlockId::Number(1)).unwrap().is_some()) } #[tokio::test] async fn manual_seal_and_finalization() { let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - let select_chain = LongestChain::new(backend.clone()); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); let pool = Arc::new(BasicPool::new(Options::default(), api()).0); let env = ProposerFactory::new( @@ -318,7 +291,7 @@ mod tests { let future = run_manual_seal( Box::new(client.clone()), env, - backend.clone(), + client.clone(), pool.pool().clone(), stream, select_chain, @@ -330,7 +303,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), uxt(Alice, 0)).await; + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); let (tx, rx) = futures::channel::oneshot::channel(); @@ -358,7 +331,7 @@ mod tests { } ); // assert that there's a new block in the db. - let header = backend.blockchain().header(BlockId::Number(1)).unwrap().unwrap(); + let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); let (tx, rx) = futures::channel::oneshot::channel(); sink.send(EngineCommand::FinalizeBlock { sender: Some(tx), @@ -372,9 +345,8 @@ mod tests { #[tokio::test] async fn manual_seal_fork_blocks() { let builder = TestClientBuilder::new(); - let backend = builder.backend(); - let client = Arc::new(builder.build()); - let select_chain = LongestChain::new(backend.clone()); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); let pool = Arc::new(BasicPool::new(Options::default(), pool_api.clone()).0); @@ -387,7 +359,7 @@ mod tests { let future = run_manual_seal( Box::new(client.clone()), env, - backend.clone(), + client.clone(), pool.pool().clone(), stream, select_chain, @@ -399,7 +371,7 @@ mod tests { rt.block_on(future); }); // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), uxt(Alice, 0)).await; + let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; // assert that it was successfully imported assert!(result.is_ok()); @@ -429,21 +401,31 @@ mod tests { } ); // assert that there's a new block in the db. - assert!(backend.blockchain().header(BlockId::Number(0)).unwrap().is_some()); - assert!(pool.submit_one(&BlockId::Number(1), uxt(Alice, 1)).await.is_ok()); + assert!(client.header(&BlockId::Number(0)).unwrap().is_some()); + assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); + + pool.maintain(sp_transaction_pool::ChainEvent::NewBlock { + id: BlockId::Number(1), + header: client.header(&BlockId::Number(1)).expect("db error").expect("imported above"), + is_new_best: true, + retracted: vec![], + }).await; let (tx1, rx1) = futures::channel::oneshot::channel(); assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash.clone()), + parent_hash: Some(created_block.hash), sender: Some(tx1), create_empty: false, finalize: false, }).await.is_ok()); - assert!(rx1.await.unwrap().is_ok()); - assert!(backend.blockchain().header(BlockId::Number(1)).unwrap().is_some()); + assert_matches::assert_matches!( + rx1.await.expect("should be no error receiving"), + Ok(_) + ); + assert!(client.header(&BlockId::Number(1)).unwrap().is_some()); pool_api.increment_nonce(Alice.into()); - assert!(pool.submit_one(&BlockId::Number(2), uxt(Alice, 2)).await.is_ok()); + assert!(pool.submit_one(&BlockId::Number(2), SOURCE, uxt(Alice, 2)).await.is_ok()); let (tx2, rx2) = futures::channel::oneshot::channel(); assert!(sink.send(EngineCommand::SealNewBlock { parent_hash: Some(created_block.hash), @@ -453,6 +435,6 @@ mod tests { }).await.is_ok()); let imported = rx2.await.unwrap().unwrap(); // assert that fork block is in the db - assert!(backend.blockchain().header(BlockId::Hash(imported.hash)).unwrap().is_some()) + assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) } } diff --git a/client/consensus/manual-seal/src/seal_new_block.rs b/client/consensus/manual-seal/src/seal_new_block.rs index 39d73e16ab74fdadde2e3a597fa2ef71615cc9fa..88b58ef4cc2b39cbbe3af1209e54408390e074ef 100644 --- a/client/consensus/manual-seal/src/seal_new_block.rs +++ b/client/consensus/manual-seal/src/seal_new_block.rs @@ -33,7 +33,6 @@ use sp_consensus::{ import_queue::BoxBlockImport, }; use sp_blockchain::HeaderBackend; -use sc_client_api::backend::Backend as ClientBackend; use std::collections::HashMap; use std::time::Duration; use sp_inherents::InherentDataProviders; @@ -42,7 +41,7 @@ use sp_inherents::InherentDataProviders; const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block -pub struct SealBlockParams<'a, B: BlockT, C, CB, E, T, P: txpool::ChainApi> { +pub struct SealBlockParams<'a, B: BlockT, SC, HB, E, T, P: txpool::ChainApi> { /// if true, empty blocks(without extrinsics) will be created. /// otherwise, will return Error::EmptyTransactionPool. pub create_empty: bool, @@ -54,12 +53,12 @@ pub struct SealBlockParams<'a, B: BlockT, C, CB, E, T, P: txpool::ChainApi> { pub sender: rpc::Sender::Hash>>, /// transaction pool pub pool: Arc>, - /// client backend - pub backend: Arc, + /// header backend + pub client: Arc, /// Environment trait object for creating a proposer pub env: &'a mut E, /// SelectChain object - pub select_chain: &'a C, + pub select_chain: &'a SC, /// block import object pub block_import: &'a mut BoxBlockImport, /// inherent data provider @@ -67,24 +66,24 @@ pub struct SealBlockParams<'a, B: BlockT, C, CB, E, T, P: txpool::ChainApi> { } /// seals a new block with the given params -pub async fn seal_new_block( +pub async fn seal_new_block( SealBlockParams { create_empty, finalize, pool, parent_hash, - backend: back_end, + client, select_chain, block_import, env, inherent_data_provider, mut sender, .. - }: SealBlockParams<'_, B, SC, CB, E, T, P> + }: SealBlockParams<'_, B, SC, HB, E, T, P> ) where B: BlockT, - CB: ClientBackend, + HB: HeaderBackend, E: Environment, >::Error: std::fmt::Display, >::Error: std::fmt::Display, @@ -101,7 +100,7 @@ pub async fn seal_new_block( // or fetch the best_block. let header = match parent_hash { Some(hash) => { - match back_end.blockchain().header(BlockId::Hash(hash))? { + match client.header(BlockId::Hash(hash))? { Some(header) => header, None => return Err(Error::BlockNotFound(format!("{}", hash))), } diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index f95d196b62f7a94fec013416252456e50395b02f..c7832baae0ee4a2a27a35b88c4ec34f31e0854e5 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-pow" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" @@ -9,17 +9,20 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } -sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } +sp-timestamp = { version = "2.0.0-alpha.5", path = "../../../primitives/timestamp" } derive_more = "0.99.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index 49d2e64f6050c92b8603e6a3cf034e19263085a1..de41ea7bd2356f02f5857344b70eae335273a540 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -151,7 +151,7 @@ pub trait PowAlgorithm { /// /// This function will be called twice during the import process, so the implementation /// should be properly cached. - fn difficulty(&self, parent: &BlockId) -> Result>; + fn difficulty(&self, parent: B::Hash) -> Result>; /// Verify that the seal is valid against given pre hash when parent block is not yet imported. /// /// None means that preliminary verify is not available for this algorithm. @@ -335,7 +335,7 @@ impl BlockImport for PowBlockImport difficulty, - None => self.algorithm.difficulty(&BlockId::hash(parent_hash))?, + None => self.algorithm.difficulty(parent_hash)?, }; let pre_hash = block.header.hash(); @@ -617,9 +617,7 @@ fn mine_loop( let (header, body) = proposal.block.deconstruct(); let (difficulty, seal) = { - let difficulty = algorithm.difficulty( - &BlockId::Hash(best_hash), - )?; + let difficulty = algorithm.difficulty(best_hash)?; loop { let seal = algorithm.mine( diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index fe7958b257203c9bf24e0582515e249d3a34a74c..bf973ef47a893bbd45a6bb47b03a9f5afbfd99df 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-slots" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" @@ -10,20 +10,23 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } -futures = "0.3.1" +codec = { package = "parity-scale-codec", version = "1.3.0" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../../telemetry" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } +futures = "0.3.4" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index e23b2fb321a306243aa1bb24b7e580289f28045a..5952856bdafd960b06bb6d71b197b0f9982c3c55 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -94,7 +94,8 @@ pub trait SimpleSlotWorker { fn epoch_data(&self, header: &B::Header, slot_number: u64) -> Result; /// Returns the number of authorities given the epoch data. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> usize; + /// None indicate that the authorities information is incomplete. + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; /// Tries to claim the given slot, returning an object with claim data if successful. fn claim_slot( @@ -194,7 +195,10 @@ pub trait SimpleSlotWorker { let authorities_len = self.authorities_len(&epoch_data); - if !self.force_authoring() && self.sync_oracle().is_offline() && authorities_len > 1 { + if !self.force_authoring() && + self.sync_oracle().is_offline() && + authorities_len.map(|a| a > 1).unwrap_or(false) + { debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); telemetry!( CONSENSUS_DEBUG; @@ -254,10 +258,10 @@ pub trait SimpleSlotWorker { Box::new(futures::future::select(proposing, delay).map(move |v| match v { futures::future::Either::Left((b, _)) => b.map(|b| (b, claim)), futures::future::Either::Right(_) => { - info!("Discarding proposal for slot {}; block production took too long", slot_number); + info!("⌛️ Discarding proposal for slot {}; block production took too long", slot_number); // If the node was compiled with debug, tell the user to use release optimizations. #[cfg(build_type="debug")] - info!("Recompile your node in `--release` mode to mitigate this problem."); + info!("👉 Recompile your node in `--release` mode to mitigate this problem."); telemetry!(CONSENSUS_INFO; "slots.discarding_proposal_took_too_long"; "slot" => slot_number, ); @@ -285,7 +289,7 @@ pub trait SimpleSlotWorker { ); info!( - "Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", header_num, block_import_params.post_hash(), header_hash, @@ -462,7 +466,7 @@ impl SlotDuration { cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; info!( - "Loaded block-time = {:?} milliseconds from genesis on first-launch", + "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", genesis_slot_duration ); diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index ad325ed79f7a576bd957fd1a06fd5e473eb60394..7e8014199baa5241935cc198cce100bba9677cfa 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" @@ -9,10 +9,13 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0-alpha.2", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-authorship = { version = "2.0.0-alpha.5", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../../primitives/inherents" } log = "0.4.8" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index d30b6f95e4253f6abc9447229a9099e08ef4a735..c791f253a9295d07b3112f8f9d7fdd9a81b036a7 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-client-db" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,35 +11,37 @@ description = "Client backend that uses RocksDB database as storage." [dependencies] parking_lot = "0.10.0" log = "0.4.8" -rand = "0.7" -kvdb = "0.4.0" -kvdb-rocksdb = { version = "0.6", optional = true } -kvdb-memorydb = "0.4.0" +kvdb = "0.5.0" +kvdb-rocksdb = { version = "0.7", optional = true } +kvdb-memorydb = "0.5.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" -parity-util-mem = { version = "0.5.2", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["std"] } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sc-client = { version = "0.8.0-alpha.2", path = "../" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } -sc-state-db = { version = "0.8.0-alpha.2", path = "../state-db" } -sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../../utils/prometheus" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sc-client = { version = "0.8.0-alpha.5", path = "../" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-alpha.5", path = "../executor" } +sc-state-db = { version = "0.8.0-alpha.5", path = "../state-db" } +sp-trie = { version = "2.0.0-alpha.5", path = "../../primitives/trie" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.5", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" -kvdb-rocksdb = "0.6" +kvdb-rocksdb = "0.7" tempfile = "3" [features] default = [] test-helpers = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 05ec51f1c8d383bdbedb5ac346182235161300c3..ddac2109d7542590c66f405685a8c1f371d60ac0 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -17,9 +17,8 @@ //! State backend that's useful for benchmarking use std::sync::Arc; -use std::path::PathBuf; use std::cell::{Cell, RefCell}; -use rand::Rng; +use std::collections::HashMap; use hash_db::{Prefix, Hasher}; use sp_trie::{MemoryDB, prefixed_key}; @@ -28,12 +27,14 @@ use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; use kvdb::{KeyValueDB, DBTransaction}; -use kvdb_rocksdb::{Database, DatabaseConfig}; +use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; type DbState = sp_state_machine::TrieBackend< Arc>>, HashFor >; +type State = CachingState, B>; + struct StorageDb { db: Arc, _block: std::marker::PhantomData, @@ -49,33 +50,30 @@ impl sp_state_machine::Storage> for StorageDb { - path: PathBuf, root: Cell, genesis_root: B::Hash, - state: RefCell>>, + state: RefCell>>, db: Cell>>, - genesis: as StateBackend>>::Transaction, + genesis: HashMap, (Vec, i32)>, + record: Cell>>, + shared_cache: SharedCache, // shared cache is always empty } impl BenchmarkingState { /// Create a new instance that creates a database in a temporary dir. - pub fn new(genesis: Storage) -> Result { - let temp_dir = PathBuf::from(std::env::temp_dir()); - let name: String = rand::thread_rng().sample_iter(&rand::distributions::Alphanumeric).take(10).collect(); - let path = temp_dir.join(&name); - + pub fn new(genesis: Storage, _cache_size_mb: Option) -> Result { let mut root = B::Hash::default(); let mut mdb = MemoryDB::>::default(); sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - std::fs::create_dir(&path).map_err(|_| String::from("Error creating temp dir"))?; let mut state = BenchmarkingState { state: RefCell::new(None), db: Cell::new(None), - path, root: Cell::new(root), genesis: Default::default(), genesis_root: Default::default(), + record: Default::default(), + shared_cache: new_shared_cache(0, (1, 10)), }; state.reopen()?; @@ -88,41 +86,28 @@ impl BenchmarkingState { genesis.top.into_iter().map(|(k, v)| (k, Some(v))), child_delta, ); - state.genesis = transaction.clone(); + state.genesis = transaction.clone().drain(); state.genesis_root = root.clone(); state.commit(root, transaction)?; + state.record.take(); Ok(state) } fn reopen(&self) -> Result<(), String> { *self.state.borrow_mut() = None; - self.db.set(None); - let db_config = DatabaseConfig::with_columns(1); - let path = self.path.to_str() - .ok_or_else(|| String::from("Invalid database path"))?; - let db = Arc::new(Database::open(&db_config, &path).map_err(|e| format!("Error opening database: {:?}", e))?); + let db = match self.db.take() { + Some(db) => db, + None => Arc::new(::kvdb_memorydb::create(1)), + }; self.db.set(Some(db.clone())); let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); - *self.state.borrow_mut() = Some(DbState::::new(storage_db, self.root.get())); + *self.state.borrow_mut() = Some(State::new( + DbState::::new(storage_db, self.root.get()), + self.shared_cache.clone(), + None + )); Ok(()) } - - fn kill(&self) -> Result<(), String> { - self.db.set(None); - *self.state.borrow_mut() = None; - let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - self.root.set(root); - - std::fs::remove_dir_all(&self.path).map_err(|_| "Error removing database dir".into()) - } -} - -impl Drop for BenchmarkingState { - fn drop(&mut self) { - self.kill().ok(); - } } fn state_err() -> String { @@ -257,16 +242,20 @@ impl StateBackend> for BenchmarkingState { { if let Some(db) = self.db.take() { let mut db_transaction = DBTransaction::new(); - - for (key, (val, rc)) in transaction.drain() { + let changes = transaction.drain(); + let mut keys = Vec::with_capacity(changes.len()); + for (key, (val, rc)) in changes { if rc > 0 { db_transaction.put(0, &key, &val); } else if rc < 0 { db_transaction.delete(0, &key); } + keys.push(key); } + self.record.set(keys); db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; self.root.set(storage_root); + self.db.set(Some(db)) } else { return Err("Trying to commit to a closed db".into()) } @@ -274,15 +263,36 @@ impl StateBackend> for BenchmarkingState { } fn wipe(&self) -> Result<(), Self::Error> { - self.kill()?; + // Restore to genesis + let record = self.record.take(); + if let Some(db) = self.db.take() { + let mut db_transaction = DBTransaction::new(); + for key in record { + match self.genesis.get(&key) { + Some((v, _)) => db_transaction.put(0, &key, v), + None => db_transaction.delete(0, &key), + } + } + db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; + self.db.set(Some(db)); + } + + self.root.set(self.genesis_root.clone()); self.reopen()?; - self.commit(self.genesis_root.clone(), self.genesis.clone())?; Ok(()) } + + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats)); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) + } } impl std::fmt::Debug for BenchmarkingState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "DB at {:?}", self.path) + write!(f, "Bench DB") } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 4494978aeef4012e2cb2b1275e23f60127c2de50..70f666aebf0e526157605ed974cef487f031a088 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -47,7 +47,7 @@ use std::io; use std::collections::HashMap; use sc_client_api::{ - ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize, + ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize, CloneableSpawn, execution_extensions::ExecutionExtensions, backend::{NewBlockState, PrunableStateChangesTrieStorage}, }; @@ -73,7 +73,7 @@ use sc_executor::RuntimeInfo; use sp_state_machine::{ DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, StorageCollection, ChildStorageCollection, - backend::Backend as StateBackend, + backend::Backend as StateBackend, StateMachineStats, }; use crate::utils::{DatabaseType, Meta, db_err, meta_keys, read_db, read_meta}; use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; @@ -256,6 +256,14 @@ impl StateBackend> for RefTrackingState { { self.state.as_trie_backend() } + + fn register_overlay_stats(&mut self, stats: &StateMachineStats) { + self.state.register_overlay_stats(stats); + } + + fn usage_info(&self) -> StateUsageInfo { + self.state.usage_info() + } } /// Database settings. @@ -276,8 +284,8 @@ pub enum DatabaseSettingsSrc { Path { /// Path to the database. path: PathBuf, - /// Cache size in bytes. If `None` default is used. - cache_size: Option, + /// Cache size in MiB. + cache_size: usize, }, /// Use a custom already-open database. @@ -292,6 +300,7 @@ pub fn new_client( fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, + spawn_handle: Box, prometheus_registry: Option, ) -> Result<( sc_client::Client< @@ -309,7 +318,7 @@ pub fn new_client( E: CodeExecutor + RuntimeInfo, { let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); - let executor = sc_client::LocalCallExecutor::new(backend.clone(), executor); + let executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle); Ok(( sc_client::Client::new( backend.clone(), @@ -416,6 +425,7 @@ impl sc_client::blockchain::HeaderBackend for BlockchainDb genesis_hash: meta.genesis_hash, finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + number_leaves: self.leaves.read().count(), } } @@ -1115,6 +1125,8 @@ impl Backend { let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); let mut ops: u64 = 0; let mut bytes: u64 = 0; + let mut removal: u64 = 0; + let mut bytes_removal: u64 = 0; for (key, (val, rc)) in operation.db_updates.drain() { if rc > 0 { ops += 1; @@ -1122,14 +1134,26 @@ impl Backend { changeset.inserted.push((key, val.to_vec())); } else if rc < 0 { - ops += 1; - bytes += key.len() as u64; + removal += 1; + bytes_removal += key.len() as u64; changeset.deleted.push(key); } } - self.state_usage.tally_writes(ops, bytes); + self.state_usage.tally_writes_nodes(ops, bytes); + self.state_usage.tally_removed_nodes(removal, bytes_removal); + let mut ops: u64 = 0; + let mut bytes: u64 = 0; + for (key, value) in operation.storage_updates.iter() + .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { + ops += 1; + bytes += key.len() as u64; + if let Some(v) = value.as_ref() { + bytes += v.len() as u64; + } + } + self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); let commit = self.storage.state_db.insert_block( &hash, @@ -1497,8 +1521,10 @@ impl sc_client_api::backend::Backend for Backend { reads: io_stats.reads, average_transaction_size: io_stats.avg_transaction_size() as u64, state_reads: state_stats.reads.ops, - state_reads_cache: state_stats.cache_reads.ops, state_writes: state_stats.writes.ops, + state_writes_cache: state_stats.overlay_writes.ops, + state_reads_cache: state_stats.cache_reads.ops, + state_writes_nodes: state_stats.nodes_writes.ops, }, }) } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index cda1a1195268e74893b6a18440f23d7d1495d971..e3dcdedd5096d533dbe4c0da4452e47161e3b866 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -153,6 +153,7 @@ impl BlockchainHeaderBackend for LightStorage genesis_hash: meta.genesis_hash, finalized_hash: meta.finalized_hash, finalized_number: meta.finalized_number, + number_leaves: 1, } } @@ -592,8 +593,10 @@ impl LightBlockchainStorage for LightStorage average_transaction_size: io_stats.avg_transaction_size() as u64, // Light client does not track those state_reads: 0, - state_reads_cache: 0, state_writes: 0, + state_reads_cache: 0, + state_writes_cache: 0, + state_writes_nodes: 0, } }) } @@ -960,7 +963,7 @@ pub(crate) mod tests { fn run_checks(db: &LightStorage, max: u64, checks: &[(u64, Option>)]) { for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { - let actual = get_authorities(db.cache(), BlockId::Number(*at)); + let actual = authorities(db.cache(), BlockId::Number(*at)); assert_eq!(*expected, actual); } } @@ -975,7 +978,7 @@ pub(crate) mod tests { map } - fn get_authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { + fn authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) } @@ -1023,9 +1026,9 @@ pub(crate) mod tests { // ... -> B2(1) -> B2_1(1) -> B2_2(2) // => the cache ignores all writes before best finalized block let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); - assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_1))); + assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); let hash2_2 = insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash2_1, 4)); - assert_eq!(None, get_authorities(db.cache(), BlockId::Hash(hash2_2))); + assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); } let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { @@ -1037,55 +1040,55 @@ pub(crate) mod tests { let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || default_header(&hash6_1, 8)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || default_header(&hash6_1, 8)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) }; @@ -1094,27 +1097,27 @@ pub(crate) mod tests { // finalize block hash6_1 db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); // finalize block hash6_2 db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); assert_eq!( - get_authorities(db.cache(), BlockId::Hash(hash6)), + authorities(db.cache(), BlockId::Hash(hash6)), Some(vec![auth1(), auth2()]), ); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); - assert_eq!(get_authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); } } diff --git a/client/db/src/stats.rs b/client/db/src/stats.rs index 1d6ed8e7f0493c9adf7d516bfacd0467c231dae7..8bc93b5b644cf985ff81fa0aaa0c0154c64eeaa0 100644 --- a/client/db/src/stats.rs +++ b/client/db/src/stats.rs @@ -25,6 +25,10 @@ pub struct StateUsageStats { bytes_read: AtomicU64, writes: AtomicU64, bytes_written: AtomicU64, + writes_nodes: AtomicU64, + bytes_written_nodes: AtomicU64, + removed_nodes: AtomicU64, + bytes_removed_nodes: AtomicU64, reads_cache: AtomicU64, bytes_read_cache: AtomicU64, } @@ -38,6 +42,10 @@ impl StateUsageStats { bytes_read: 0.into(), writes: 0.into(), bytes_written: 0.into(), + writes_nodes: 0.into(), + bytes_written_nodes: 0.into(), + removed_nodes: 0.into(), + bytes_removed_nodes: 0.into(), reads_cache: 0.into(), bytes_read_cache: 0.into(), } @@ -70,7 +78,19 @@ impl StateUsageStats { val } - /// Tally some write operations, including their byte count. + /// Tally some write trie nodes operations, including their byte count. + pub fn tally_writes_nodes(&self, ops: u64, data_bytes: u64) { + self.writes_nodes.fetch_add(ops, AtomicOrdering::Relaxed); + self.bytes_written_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + + /// Tally some removed trie nodes operations, including their byte count. + pub fn tally_removed_nodes(&self, ops: u64, data_bytes: u64) { + self.removed_nodes.fetch_add(ops, AtomicOrdering::Relaxed); + self.bytes_removed_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + + /// Tally some write trie nodes operations, including their byte count. pub fn tally_writes(&self, ops: u64, data_bytes: u64) { self.writes.fetch_add(ops, AtomicOrdering::Relaxed); self.bytes_written.fetch_add(data_bytes, AtomicOrdering::Relaxed); @@ -80,8 +100,10 @@ impl StateUsageStats { pub fn merge_sm(&self, info: sp_state_machine::UsageInfo) { self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed); self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); - self.writes.fetch_add(info.writes.ops, AtomicOrdering::Relaxed); - self.bytes_written.fetch_add(info.writes.bytes, AtomicOrdering::Relaxed); + self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); + self.bytes_written_nodes.fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); + self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); + self.bytes_removed_nodes.fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); } @@ -100,7 +122,11 @@ impl StateUsageStats { sp_state_machine::UsageInfo { reads: unit(&self.reads, &self.bytes_read), writes: unit(&self.writes, &self.bytes_written), + nodes_writes: unit(&self.writes_nodes, &self.bytes_written_nodes), + removed_nodes: unit(&self.removed_nodes, &self.bytes_removed_nodes), cache_reads: unit(&self.reads_cache, &self.bytes_read_cache), + modified_reads: Default::default(), + overlay_writes: Default::default(), // TODO: Proper tracking state of memory footprint here requires // imposing `MallocSizeOf` requirement on half of the codebase, // so it is an open question how to do it better diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 2ac1ee3dbd5f4af37f6fd21d72a55ee7a5748d1d..63268992632410934d0518039882240dee328c0d 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -299,6 +299,8 @@ pub struct CacheChanges { pub struct CachingState { /// Usage statistics usage: StateUsageStats, + /// State machine registered stats + overlay_stats: sp_state_machine::StateMachineStats, /// Backing state. state: S, /// Cache data. @@ -428,6 +430,7 @@ impl>, B: BlockT> CachingState { ) -> Self { CachingState { usage: StateUsageStats::new(), + overlay_stats: sp_state_machine::StateMachineStats::default(), state, cache: CacheChanges { shared_cache, @@ -663,8 +666,14 @@ impl>, B: BlockT> StateBackend> for Cachin self.state.as_trie_backend() } + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.overlay_stats.add(stats); + } + fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.usage.take() + let mut info = self.usage.take(); + info.include_state_machine_states(&self.overlay_stats); + info } } @@ -852,6 +861,10 @@ impl>, B: BlockT> StateBackend> for Syncin .as_trie_backend() } + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.caching_state().register_overlay_stats(stats); + } + fn usage_info(&self) -> sp_state_machine::UsageInfo { self.caching_state().usage_info() } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 971acf8456b6463fe315768f16e2f8cf3af8254b..5e6794108ecfe87a1928f83a7939f81acac6d2e2 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -166,7 +166,7 @@ mod tests { state_cache_size: 0, state_cache_child_ratio: None, pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: None }, + source: DatabaseSettingsSrc::Path { path: db_path.to_owned(), cache_size: 128 }, }, DatabaseType::Full).map(|_| ()) } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index f26714eb5a7dbf9edae0fcac6a850e877478fd0e..16239a82c2e24508e172da3c0c8c90e646061556 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -227,24 +227,31 @@ pub fn open_database( // and now open database assuming that it has the latest version let mut db_config = DatabaseConfig::with_columns(NUM_COLUMNS); + let state_col_budget = (*cache_size as f64 * 0.9) as usize; + let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + let mut memory_budget = std::collections::HashMap::new(); + let path = path.to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - if let Some(cache_size) = cache_size { - let state_col_budget = (*cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - - let mut memory_budget = std::collections::HashMap::new(); - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); - } + for i in 0..NUM_COLUMNS { + if i == crate::columns::STATE { + memory_budget.insert(i, state_col_budget); + } else { + memory_budget.insert(i, other_col_budget); } - - db_config.memory_budget = memory_budget; } - let path = path.to_str() - .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + + db_config.memory_budget = memory_budget; + + log::trace!( + target: "db", + "Open database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", + path, + state_col_budget, + NUM_COLUMNS, + other_col_budget, + ); + Arc::new(Database::open(&db_config, &path).map_err(db_err)?) }, #[cfg(not(any(feature = "kvdb-rocksdb", test)))] diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index 1353bc57307b9c5d664b005f7d1bcbb997ca093f..cae0d56d8ed5eea8fef0e77c92eec6c2d5d9a61b 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,22 +11,22 @@ documentation = "https://docs.rs/sc-executor" [dependencies] derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0-alpha.2", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0-alpha.2", path = "../../primitives/panic-handler" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-trie = { version = "2.0.0-alpha.5", path = "../../primitives/trie" } +sp-serializer = { version = "2.0.0-alpha.5", path = "../../primitives/serializer" } +sp-version = { version = "2.0.0-alpha.5", path = "../../primitives/version" } +sp-panic-handler = { version = "2.0.0-alpha.5", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8.0-alpha.2", path = "common" } -sc-executor-wasmi = { version = "0.8.0-alpha.2", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8.0-alpha.2", path = "wasmtime", optional = true } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8.0-alpha.5", path = "common" } +sc-executor-wasmi = { version = "0.8.0-alpha.5", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8.0-alpha.5", path = "wasmtime", optional = true } parking_lot = "0.10.0" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -37,9 +37,9 @@ wabt = "0.9.2" hex-literal = "0.2.1" sc-runtime-test = { version = "2.0.0-dev", path = "runtime-test" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } test-case = "0.3.3" -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } [features] default = [ "std" ] @@ -52,3 +52,6 @@ wasmtime = [ wasmi-errno = [ "wasmi/errno" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index 04db56938a4bc1105b140b8f1d74207a2c5d3395..f9ce7d4e399c52af6c42fbfb845857861a4ffe85 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-common" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,13 +12,17 @@ documentation = "https://docs.rs/sc-executor-common/" [dependencies] log = "0.4.8" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.2.0" } +parity-wasm = "0.41.0" +codec = { package = "parity-scale-codec", version = "1.3.0" } wasmi = "0.6.2" -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0-alpha.2", path = "../../../primitives/serializer" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.5", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime-interface" } +sp-serializer = { version = "2.0.0-alpha.5", path = "../../../primitives/serializer" } [features] default = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/executor/common/src/lib.rs b/client/executor/common/src/lib.rs index cc515dcf9dab9a8f852f2d9b2dc1f3e2af40773f..7f3864e6152fb2a00122bc6e6c2d2e9c2a4102e8 100644 --- a/client/executor/common/src/lib.rs +++ b/client/executor/common/src/lib.rs @@ -18,6 +18,7 @@ #![warn(missing_docs)] -pub mod sandbox; pub mod error; +pub mod sandbox; +pub mod util; pub mod wasm_runtime; diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs new file mode 100644 index 0000000000000000000000000000000000000000..149db13bc0768a92408f8b1af1a05d518a8ae97d --- /dev/null +++ b/client/executor/common/src/util.rs @@ -0,0 +1,138 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! A set of utilities for resetting a wasm instance to its initial state. + +use crate::error::{self, Error}; +use std::mem; +use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; + +/// A bunch of information collected from a WebAssembly module. +pub struct WasmModuleInfo { + raw_module: RawModule, +} + +impl WasmModuleInfo { + /// Create `WasmModuleInfo` from the given wasm code. + /// + /// Returns `None` if the wasm code cannot be deserialized. + pub fn new(wasm_code: &[u8]) -> Option { + let raw_module: RawModule = deserialize_buffer(wasm_code).ok()?; + Some(Self { raw_module }) + } + + /// Extract the data segments from the given wasm code. + /// + /// Returns `Err` if the given wasm code cannot be deserialized. + fn data_segments(&self) -> Vec { + self.raw_module + .data_section() + .map(|ds| ds.entries()) + .unwrap_or(&[]) + .to_vec() + } + + /// The number of globals defined in locally in this module. + pub fn declared_globals_count(&self) -> u32 { + self.raw_module + .global_section() + .map(|gs| gs.entries().len() as u32) + .unwrap_or(0) + } + + /// The number of imports of globals. + pub fn imported_globals_count(&self) -> u32 { + self.raw_module + .import_section() + .map(|is| is.globals() as u32) + .unwrap_or(0) + } +} + +/// This is a snapshot of data segments specialzied for a particular instantiation. +/// +/// Note that this assumes that no mutable globals are used. +#[derive(Clone)] +pub struct DataSegmentsSnapshot { + /// The list of data segments represented by (offset, contents). + data_segments: Vec<(u32, Vec)>, +} + +impl DataSegmentsSnapshot { + /// Create a snapshot from the data segments from the module. + pub fn take(module: &WasmModuleInfo) -> error::Result { + let data_segments = module + .data_segments() + .into_iter() + .map(|mut segment| { + // Just replace contents of the segment since the segments will be discarded later + // anyway. + let contents = mem::replace(segment.value_mut(), vec![]); + + let init_expr = match segment.offset() { + Some(offset) => offset.code(), + // Return if the segment is passive + None => return Err(Error::from("Shared memory is not supported".to_string())), + }; + + // [op, End] + if init_expr.len() != 2 { + return Err(Error::from( + "initializer expression can have only up to 2 expressions in wasm 1.0" + .to_string(), + )); + } + let offset = match &init_expr[0] { + Instruction::I32Const(v) => *v as u32, + Instruction::GetGlobal(_) => { + // In a valid wasm file, initializer expressions can only refer imported + // globals. + // + // At the moment of writing the Substrate Runtime Interface does not provide + // any globals. There is nothing that prevents us from supporting this + // if/when we gain those. + return Err(Error::from( + "Imported globals are not supported yet".to_string(), + )); + } + insn => { + return Err(Error::from(format!( + "{:?} is not supported as initializer expression in wasm 1.0", + insn + ))) + } + }; + + Ok((offset, contents)) + }) + .collect::>>()?; + + Ok(Self { data_segments }) + } + + /// Apply the given snapshot to a linear memory. + /// + /// Linear memory interface is represented by a closure `memory_set`. + pub fn apply( + &self, + mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, + ) -> Result<(), E> { + for (offset, contents) in &self.data_segments { + memory_set(*offset, contents)?; + } + Ok(()) + } +} diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index ad7c44718d243433ae6e2e775d29b213d091ca3a..e50061f4f24bb71639a5204cf326d6be7d397e6f 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -10,12 +10,12 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/sandbox" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } -sp-allocator = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/allocator" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.8.0-alpha.5", default-features = false, path = "../../../primitives/sandbox" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +sp-allocator = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/allocator" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -28,3 +28,6 @@ std = [ "sp-std/std", "sp-allocator/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 5641755ef18ba10b1312faba4ec3730343483935..778bc808004bb2470b528c802bd14c1d0fe1bc51 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -483,5 +483,7 @@ mod tests { 2, ); }); + + my_interface::say_hello_world("hey"); } } diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index e6c16453f365fb560f577bcda600edbbb586845a..7a369cc470ab55c4413dc3c3cc3beb7604a45068 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -40,6 +40,12 @@ pub enum WasmExecutionMethod { Compiled, } +impl Default for WasmExecutionMethod { + fn default() -> WasmExecutionMethod { + WasmExecutionMethod::Interpreted + } +} + /// A Wasm runtime object along with its cached runtime version. struct VersionedRuntime { /// Runtime code hash. diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index aa259e29f8dfb3dfb564b3b8060af8f238f537e6..fe5bd70d00a750d4d65ff0b7e949adcdd762a046 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,21 +1,23 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" description = "This crate provides an implementation of `WasmRuntime` that is baked by wasmi." -documentation = "https://docs.rs/sc-execturo-wasmi" +documentation = "https://docs.rs/sc-executor-wasmi" [dependencies] log = "0.4.8" wasmi = "0.6.2" -parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.2.0" } -sc-executor-common = { version = "0.8.0-alpha.2", path = "../common" } -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sc-executor-common = { version = "0.8.0-alpha.5", path = "../common" } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.5", path = "../../../primitives/allocator" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index 6348c2413357f03f9de161d4ff537cf8a77e818d..e4b4aca40967d982fe342985f829bf2c4c190e39 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -16,21 +16,25 @@ //! This crate provides an implementation of `WasmModule` that is baked by wasmi. -use sc_executor_common::{error::{Error, WasmError}, sandbox}; -use std::{str, mem, cell::RefCell, sync::Arc}; +use std::{str, cell::RefCell, sync::Arc}; use wasmi::{ Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, - memory_units::Pages, RuntimeValue::{I32, I64, self}, + memory_units::Pages, + RuntimeValue::{I32, I64, self}, }; use codec::{Encode, Decode}; use sp_core::sandbox as sandbox_primitives; use log::{error, trace, debug}; -use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; use sp_wasm_interface::{ FunctionContext, Pointer, WordSize, Sandbox, MemoryId, Result as WResult, Function, }; use sp_runtime_interface::unpack_ptr_and_len; use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; +use sc_executor_common::{ + error::{Error, WasmError}, + sandbox, +}; +use sc_executor_common::util::{DataSegmentsSnapshot, WasmModuleInfo}; struct FunctionExecutor<'a> { sandbox_store: sandbox::Store, @@ -530,52 +534,14 @@ fn instantiate_module( /// /// It is used for restoring the state of the module after execution. #[derive(Clone)] -struct StateSnapshot { - /// The offset and the content of the memory segments that should be used to restore the snapshot - data_segments: Vec<(u32, Vec)>, +struct GlobalValsSnapshot { /// The list of all global mutable variables of the module in their sequential order. global_mut_values: Vec, } -impl StateSnapshot { +impl GlobalValsSnapshot { // Returns `None` if instance is not valid. - fn take( - module_instance: &ModuleRef, - data_segments: Vec, - ) -> Option { - let prepared_segments = data_segments - .into_iter() - .map(|mut segment| { - // Just replace contents of the segment since the segments will be discarded later - // anyway. - let contents = mem::replace(segment.value_mut(), vec![]); - - let init_expr = match segment.offset() { - Some(offset) => offset.code(), - // Return if the segment is passive - None => return None - }; - - // [op, End] - if init_expr.len() != 2 { - return None; - } - let offset = match init_expr[0] { - Instruction::I32Const(v) => v as u32, - Instruction::GetGlobal(idx) => { - let global_val = module_instance.globals().get(idx as usize)?.get(); - match global_val { - RuntimeValue::I32(v) => v as u32, - _ => return None, - } - } - _ => return None, - }; - - Some((offset, contents)) - }) - .collect::>>()?; - + fn take(module_instance: &ModuleRef) -> Self { // Collect all values of mutable globals. let global_mut_values = module_instance .globals() @@ -583,42 +549,27 @@ impl StateSnapshot { .filter(|g| g.is_mutable()) .map(|g| g.get()) .collect(); - - Some(Self { - data_segments: prepared_segments, - global_mut_values, - }) + Self { global_mut_values } } /// Reset the runtime instance to the initial version by restoring /// the preserved memory and globals. /// /// Returns `Err` if applying the snapshot is failed. - fn apply(&self, instance: &ModuleRef, memory: &MemoryRef) -> Result<(), WasmError> { - // First, erase the memory and copy the data segments into it. - memory - .erase() - .map_err(|e| WasmError::ErasingFailed(e.to_string()))?; - for (offset, contents) in &self.data_segments { - memory - .set(*offset, contents) - .map_err(|_| WasmError::ApplySnapshotFailed)?; - } - - // Second, restore the values of mutable globals. + fn apply(&self, instance: &ModuleRef) -> Result<(), WasmError> { for (global_ref, global_val) in instance .globals() .iter() .filter(|g| g.is_mutable()) .zip(self.global_mut_values.iter()) - { - // the instance should be the same as used for preserving and - // we iterate the same way it as we do it for preserving values that means that the - // types should be the same and all the values are mutable. So no error is expected/ - global_ref - .set(*global_val) - .map_err(|_| WasmError::ApplySnapshotFailed)?; - } + { + // the instance should be the same as used for preserving and + // we iterate the same way it as we do it for preserving values that means that the + // types should be the same and all the values are mutable. So no error is expected/ + global_ref + .set(*global_val) + .map_err(|_| WasmError::ApplySnapshotFailed)?; + } Ok(()) } } @@ -634,8 +585,9 @@ pub struct WasmiRuntime { allow_missing_func_imports: bool, /// Numer of heap pages this runtime uses. heap_pages: u64, - /// Data segments created for each new instance. - data_segments: Vec, + + global_vals_snapshot: GlobalValsSnapshot, + data_segments_snapshot: DataSegmentsSnapshot, } impl WasmModule for WasmiRuntime { @@ -648,19 +600,11 @@ impl WasmModule for WasmiRuntime { self.allow_missing_func_imports, ).map_err(|e| WasmError::Instantiation(e.to_string()))?; - // Take state snapshot before executing anything. - let state_snapshot = StateSnapshot::take(&instance, self.data_segments.clone()) - .expect( - "`take` returns `Err` if the module is not valid; - we already loaded module above, thus the `Module` is proven to be valid at this point; - qed - ", - ); - Ok(Box::new(WasmiInstance { instance, memory, - state_snapshot, + global_vals_snapshot: self.global_vals_snapshot.clone(), + data_segments_snapshot: self.data_segments_snapshot.clone(), host_functions: self.host_functions.clone(), allow_missing_func_imports: self.allow_missing_func_imports, missing_functions, @@ -682,10 +626,29 @@ pub fn create_runtime( // // A return of this error actually indicates that there is a problem in logic, since // we just loaded and validated the `module` above. - let data_segments = extract_data_segments(&code)?; + let (data_segments_snapshot, global_vals_snapshot) = { + let (instance, _, _) = instantiate_module( + heap_pages as usize, + &module, + &host_functions, + allow_missing_func_imports, + ) + .map_err(|e| WasmError::Instantiation(e.to_string()))?; + + let data_segments_snapshot = DataSegmentsSnapshot::take( + &WasmModuleInfo::new(code) + .ok_or_else(|| WasmError::Other("cannot deserialize module".to_string()))?, + ) + .map_err(|e| WasmError::Other(e.to_string()))?; + let global_vals_snapshot = GlobalValsSnapshot::take(&instance); + + (data_segments_snapshot, global_vals_snapshot) + }; + Ok(WasmiRuntime { module, - data_segments, + data_segments_snapshot, + global_vals_snapshot, host_functions: Arc::new(host_functions), allow_missing_func_imports, heap_pages, @@ -698,12 +661,14 @@ pub struct WasmiInstance { instance: ModuleRef, /// The memory instance of used by the wasm module. memory: MemoryRef, - /// The snapshot of the instance's state taken just after the instantiation. - state_snapshot: StateSnapshot, + /// The snapshot of global variable values just after instantiation. + global_vals_snapshot: GlobalValsSnapshot, + /// The snapshot of data segments. + data_segments_snapshot: DataSegmentsSnapshot, /// The host functions registered for this instance. host_functions: Arc>, /// Enable stub generation for functions that are not available in `host_functions`. - /// These stubs will error when the wasm blob tries to call them. + /// These stubs will error when the wasm blob trie to call them. allow_missing_func_imports: bool, /// List of missing functions detected during function resolution missing_functions: Vec, @@ -713,19 +678,26 @@ pub struct WasmiInstance { unsafe impl Send for WasmiInstance {} impl WasmInstance for WasmiInstance { - fn call( - &self, - method: &str, - data: &[u8], - ) -> Result, Error> { - self.state_snapshot.apply(&self.instance, &self.memory) - .map_err(|e| { - // Snapshot restoration failed. This is pretty unexpected since this can happen - // if some invariant is broken or if the system is under extreme memory pressure - // (so erasing fails). - error!(target: "wasm-executor", "snapshot restoration failed: {}", e); - e - })?; + fn call(&self, method: &str, data: &[u8]) -> Result, Error> { + // We reuse a single wasm instance for multiple calls and a previous call (if any) + // altered the state. Therefore, we need to restore the instance to original state. + + // First, zero initialize the linear memory. + self.memory.erase().map_err(|e| { + // Snapshot restoration failed. This is pretty unexpected since this can happen + // if some invariant is broken or if the system is under extreme memory pressure + // (so erasing fails). + error!(target: "wasm-executor", "snapshot restoration failed: {}", e); + WasmError::ErasingFailed(e.to_string()) + })?; + + // Second, reapply data segments into the linear memory. + self.data_segments_snapshot + .apply(|offset, contents| self.memory.set(offset, contents))?; + + // Third, restore the global variables to their initial values. + self.global_vals_snapshot.apply(&self.instance)?; + call_in_wasm_module( &self.instance, &self.memory, @@ -750,18 +722,3 @@ impl WasmInstance for WasmiInstance { } } } - -/// Extract the data segments from the given wasm code. -/// -/// Returns `Err` if the given wasm code cannot be deserialized. -fn extract_data_segments(wasm_code: &[u8]) -> Result, WasmError> { - let raw_module: RawModule = deserialize_buffer(wasm_code) - .map_err(|_| WasmError::CantDeserializeWasm)?; - - let segments = raw_module - .data_section() - .map(|ds| ds.entries()) - .unwrap_or(&[]) - .to_vec(); - Ok(segments) -} diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index 9f8784cc981b7d1a79438846255d17afcf27d9d3..fcedf20b7a71612e4b6bc78f3e05aa42dfd2a41b 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,13 +12,20 @@ description = "Defines a `WasmRuntime` that uses the Wasmtime JIT to execute." log = "0.4.8" scoped-tls = "1.0" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.2.0" } -sc-executor-common = { version = "0.8.0-alpha.2", path = "../common" } -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } -wasmtime = { git = "https://github.com/paritytech/wasmtime", branch = "a-thread-safe-api" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sc-executor-common = { version = "0.8.0-alpha.5", path = "../common" } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.5", path = "../../../primitives/allocator" } +wasmtime = { package = "substrate-wasmtime", version = "0.13.0-threadsafe.1" } +wasmtime_runtime = { package = "substrate-wasmtime-runtime", version = "0.13.0-threadsafe.1" } +wasmtime-environ = "0.12.0" +cranelift-wasm = "0.59.0" +cranelift-codegen = "0.59.0" [dev-dependencies] assert_matches = "1.3.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 159746801a52aa68e45c63b4f060a1804415a2f8..469668802f18649c80b59715783a3ad784970396 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -20,11 +20,55 @@ use crate::util; use crate::imports::Imports; -use sc_executor_common::error::{Error, Result}; +use std::{slice, marker}; +use sc_executor_common::{ + error::{Error, Result}, + util::{WasmModuleInfo, DataSegmentsSnapshot}, +}; use sp_wasm_interface::{Pointer, WordSize, Value}; -use std::slice; -use std::marker; -use wasmtime::{Instance, Module, Memory, Table, Val}; +use wasmtime::{Store, Instance, Module, Memory, Table, Val}; + +mod globals_snapshot; + +pub use globals_snapshot::GlobalsSnapshot; + +pub struct ModuleWrapper { + imported_globals_count: u32, + globals_count: u32, + module: Module, + data_segments_snapshot: DataSegmentsSnapshot, +} + +impl ModuleWrapper { + pub fn new(store: &Store, code: &[u8]) -> Result { + let module = Module::new(&store, code) + .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; + + let module_info = WasmModuleInfo::new(code) + .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; + let declared_globals_count = module_info.declared_globals_count(); + let imported_globals_count = module_info.imported_globals_count(); + let globals_count = imported_globals_count + declared_globals_count; + + let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) + .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; + + Ok(Self { + module, + imported_globals_count, + globals_count, + data_segments_snapshot, + }) + } + + pub fn module(&self) -> &Module { + &self.module + } + + pub fn data_segments_snapshot(&self) -> &DataSegmentsSnapshot { + &self.data_segments_snapshot + } +} /// Wrap the given WebAssembly Instance of a wasm module with Substrate-runtime. /// @@ -32,6 +76,8 @@ use wasmtime::{Instance, Module, Memory, Table, Val}; /// routines. pub struct InstanceWrapper { instance: Instance, + globals_count: u32, + imported_globals_count: u32, // The memory instance of the `instance`. // // It is important to make sure that we don't make any copies of this to make it easier to proof @@ -44,8 +90,8 @@ pub struct InstanceWrapper { impl InstanceWrapper { /// Create a new instance wrapper from the given wasm module. - pub fn new(module: &Module, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(module, &imports.externs) + pub fn new(module_wrapper: &ModuleWrapper, imports: &Imports, heap_pages: u32) -> Result { + let instance = Instance::new(&module_wrapper.module, &imports.externs) .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; let memory = match imports.memory_import_index { @@ -66,8 +112,10 @@ impl InstanceWrapper { Ok(Self { table: get_table(&instance), - memory, instance, + globals_count: module_wrapper.globals_count, + imported_globals_count: module_wrapper.imported_globals_count, + memory, _not_send_nor_sync: marker::PhantomData, }) } diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs new file mode 100644 index 0000000000000000000000000000000000000000..a6ab3fed604c43c8569703aba4c79a4484775383 --- /dev/null +++ b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs @@ -0,0 +1,130 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use super::InstanceWrapper; +use sc_executor_common::{ + error::{Error, Result}, +}; +use sp_wasm_interface::Value; +use cranelift_codegen::ir; +use cranelift_wasm::GlobalIndex; + +/// A snapshot of a global variables values. This snapshot can be used later for restoring the +/// values to the preserved state. +/// +/// Technically, a snapshot stores only values of mutable global variables. This is because +/// immutable global variables always have the same values. +pub struct GlobalsSnapshot { + handle: wasmtime_runtime::InstanceHandle, + preserved_mut_globals: Vec<(*mut wasmtime_runtime::VMGlobalDefinition, Value)>, +} + +impl GlobalsSnapshot { + /// Take a snapshot of global variables for a given instance. + pub fn take(instance_wrapper: &InstanceWrapper) -> Result { + // EVIL: + // Usage of an undocumented function. + let handle = instance_wrapper.instance.handle().clone(); + + let mut preserved_mut_globals = vec![]; + + for global_idx in instance_wrapper.imported_globals_count..instance_wrapper.globals_count { + let (def, global) = match handle.lookup_by_declaration( + &wasmtime_environ::Export::Global(GlobalIndex::from_u32(global_idx)), + ) { + wasmtime_runtime::Export::Global { + definition, global, .. + } => (definition, global), + _ => unreachable!("only globals can be returned for a global request"), + }; + + // skip immutable globals. + if !global.mutability { + continue; + } + + let value = unsafe { + // Safety of this function solely depends on the correctness of the reference and + // the type information of the global. + read_global(def, global.ty)? + }; + preserved_mut_globals.push((def, value)); + } + + Ok(Self { + preserved_mut_globals, + handle, + }) + } + + /// Apply the snapshot to the given instance. + /// + /// This instance must be the same that was used for creation of this snapshot. + pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { + if instance_wrapper.instance.handle() != &self.handle { + return Err(Error::from("unexpected instance handle".to_string())); + } + + for (def, value) in &self.preserved_mut_globals { + unsafe { + // The following writes are safe if the precondition that this is the same instance + // this snapshot was created with: + // + // 1. These pointers must be still not-NULL and allocated. + // 2. The set of global variables is fixed for the lifetime of the same instance. + // 3. We obviously assume that the wasmtime references are correct in the first place. + // 4. We write the data with the same type it was read in the first place. + write_global(*def, *value)?; + } + } + Ok(()) + } +} + +unsafe fn read_global( + def: *const wasmtime_runtime::VMGlobalDefinition, + ty: ir::Type, +) -> Result { + let def = def + .as_ref() + .ok_or_else(|| Error::from("wasmtime global reference is null during read".to_string()))?; + let val = match ty { + ir::types::I32 => Value::I32(*def.as_i32()), + ir::types::I64 => Value::I64(*def.as_i64()), + ir::types::F32 => Value::F32(*def.as_u32()), + ir::types::F64 => Value::F64(*def.as_u64()), + _ => { + return Err(Error::from(format!( + "unsupported global variable type: {}", + ty + ))) + } + }; + Ok(val) +} + +unsafe fn write_global(def: *mut wasmtime_runtime::VMGlobalDefinition, value: Value) -> Result<()> { + let def = def + .as_mut() + .ok_or_else(|| Error::from("wasmtime global reference is null during write".to_string()))?; + match value { + Value::I32(v) => *def.as_i32_mut() = v, + Value::I64(v) => *def.as_i64_mut() = v, + Value::F32(v) => *def.as_u32_mut() = v, + Value::F64(v) => *def.as_u64_mut() = v, + } + Ok(()) +} diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 02acd33e69a628705675105e8644bad177bfc15a..0289188ba11fce00d104d21268541167dd68f54a 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -15,14 +15,14 @@ // along with Substrate. If not, see . //! Defines the compiled Wasm runtime that uses Wasmtime internally. -use std::rc::Rc; -use std::sync::Arc; use crate::host::HostState; use crate::imports::{Imports, resolve_imports}; -use crate::instance_wrapper::InstanceWrapper; +use crate::instance_wrapper::{ModuleWrapper, InstanceWrapper, GlobalsSnapshot}; use crate::state_holder; +use std::rc::Rc; +use std::sync::Arc; use sc_executor_common::{ error::{Error, Result, WasmError}, wasm_runtime::{WasmModule, WasmInstance}, @@ -30,12 +30,12 @@ use sc_executor_common::{ use sp_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{Function, Pointer, WordSize, Value}; -use wasmtime::{Config, Engine, Module, Store}; +use wasmtime::{Config, Engine, Store}; /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - module: Arc, + module_wrapper: Arc, heap_pages: u32, allow_missing_func_imports: bool, host_functions: Vec<&'static dyn Function>, @@ -46,16 +46,24 @@ impl WasmModule for WasmtimeRuntime { // Scan all imports, find the matching host functions, and create stubs that adapt arguments // and results. let imports = resolve_imports( - &self.module, + self.module_wrapper.module(), &self.host_functions, self.heap_pages, self.allow_missing_func_imports, )?; + let instance_wrapper = + InstanceWrapper::new(&self.module_wrapper, &imports, self.heap_pages)?; + let heap_base = instance_wrapper.extract_heap_base()?; + let globals_snapshot = GlobalsSnapshot::take(&instance_wrapper)?; + Ok(Box::new(WasmtimeInstance { - module: self.module.clone(), + instance_wrapper: Rc::new(instance_wrapper), + module_wrapper: Arc::clone(&self.module_wrapper), imports, + globals_snapshot, heap_pages: self.heap_pages, + heap_base, })) } } @@ -63,9 +71,12 @@ impl WasmModule for WasmtimeRuntime { /// A `WasmInstance` implementation that reuses compiled module and spawns instances /// to execute the compiled code. pub struct WasmtimeInstance { - module: Arc, + module_wrapper: Arc, + instance_wrapper: Rc, + globals_snapshot: GlobalsSnapshot, imports: Imports, heap_pages: u32, + heap_base: u32, } // This is safe because `WasmtimeInstance` does not leak reference to `self.imports` @@ -74,23 +85,32 @@ unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { fn call(&self, method: &str, data: &[u8]) -> Result> { - // TODO: reuse the instance and reset globals after call - // https://github.com/paritytech/substrate/issues/5141 - let instance = Rc::new(InstanceWrapper::new(&self.module, &self.imports, self.heap_pages)?); - call_method( - instance, - method, + let entrypoint = self.instance_wrapper.resolve_entrypoint(method)?; + let allocator = FreeingBumpHeapAllocator::new(self.heap_base); + + self.module_wrapper + .data_segments_snapshot() + .apply(|offset, contents| { + self.instance_wrapper + .write_memory_from(Pointer::new(offset), contents) + })?; + + self.globals_snapshot.apply(&*self.instance_wrapper)?; + + perform_call( data, + Rc::clone(&self.instance_wrapper), + entrypoint, + allocator, ) } fn get_global_const(&self, name: &str) -> Result> { - let instance = InstanceWrapper::new(&self.module, &self.imports, self.heap_pages)?; + let instance = InstanceWrapper::new(&self.module_wrapper, &self.imports, self.heap_pages)?; instance.get_global_val(name) } } - /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. pub fn create_runtime( @@ -105,30 +125,18 @@ pub fn create_runtime( let engine = Engine::new(&config); let store = Store::new(&engine); - let module = Module::new(&store, code) + + let module_wrapper = ModuleWrapper::new(&store, code) .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; Ok(WasmtimeRuntime { - module: Arc::new(module), + module_wrapper: Arc::new(module_wrapper), heap_pages: heap_pages as u32, allow_missing_func_imports, host_functions, }) } -/// Call a function inside a precompiled Wasm module. -fn call_method( - instance_wrapper: Rc, - method: &str, - data: &[u8], -) -> Result> { - let entrypoint = instance_wrapper.resolve_entrypoint(method)?; - let heap_base = instance_wrapper.extract_heap_base()?; - let allocator = FreeingBumpHeapAllocator::new(heap_base); - - perform_call(data, instance_wrapper, entrypoint, allocator) -} - fn perform_call( data: &[u8], instance_wrapper: Rc, diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index 6cee0cd8526f6dcbd2ed9019b172dc30b009ca05..b684c814d1b93607534c2b73fdc74aa4a99eda8b 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,44 +11,48 @@ documentation = "https://docs.rs/sc-finality-grandpa" [dependencies] -fork-tree = { version = "2.0.0-alpha.2", path = "../../utils/fork-tree" } -futures = "0.3.1" +fork-tree = { version = "2.0.0-alpha.5", path = "../../utils/fork-tree" } +futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" assert_matches = "1.3.0" -parity-scale-codec = { version = "1.2.0", features = ["derive"] } -sp-arithmetic = { version = "2.0.0-alpha.2", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-consensus = { version = "0.8.0-alpha.1", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } +parity-scale-codec = { version = "1.3.0", features = ["derive"] } +sp-arithmetic = { version = "2.0.0-alpha.5", path = "../../primitives/arithmetic" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-client = { version = "0.8.0-alpha.2", path = "../" } -sp-inherents = { version = "2.0.0-alpha.2", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sc-network-gossip = { version = "0.8.0-alpha.2", path = "../network-gossip" } -sp-finality-tracker = { version = "2.0.0-alpha.2", path = "../../primitives/finality-tracker" } -sp-finality-grandpa = { version = "2.0.0-alpha.2", path = "../../primitives/finality-grandpa" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-alpha.2" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } -finality-grandpa = { version = "0.11.1", features = ["derive-codec"] } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-client = { version = "0.8.0-alpha.5", path = "../" } +sp-inherents = { version = "2.0.0-alpha.5", path = "../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sc-network-gossip = { version = "0.8.0-alpha.5", path = "../network-gossip" } +sp-finality-tracker = { version = "2.0.0-alpha.5", path = "../../primitives/finality-tracker" } +sp-finality-grandpa = { version = "2.0.0-alpha.5", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-alpha.5"} +sc-block-builder = { version = "0.8.0-alpha.5", path = "../block-builder" } +finality-grandpa = { version = "0.11.2", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] -finality-grandpa = { version = "0.11.1", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } +finality-grandpa = { version = "0.11.2", features = ["derive-codec", "test-helpers"] } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } sc-network-test = { version = "0.8.0-dev", path = "../network/test" } -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } env_logger = "0.7.0" tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index 5e295d0baee7ab86548a1e6878090d85d885efad..fe3f2dd19eb1ce5764508fe5ff86e5919d35a2e7 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -20,7 +20,7 @@ use fork_tree::ForkTree; use parking_lot::RwLock; use finality_grandpa::voter_set::VoterSet; use parity_scale_codec::{Encode, Decode}; -use log::{debug, info}; +use log::debug; use sc_telemetry::{telemetry, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; @@ -250,6 +250,7 @@ where best_hash: H, best_number: N, is_descendent_of: &F, + initial_sync: bool, ) -> Result, E> where F: Fn(&H, &H) -> Result, { @@ -262,8 +263,10 @@ where // check if the given best block is in the same branch as the block that signaled the change. if is_descendent_of(&change.canon_hash, &best_hash)? { // apply this change: make the set canonical - info!(target: "afg", "Applying authority set change forced at block #{:?}", - change.canon_height); + afg_log!(initial_sync, + "👴 Applying authority set change forced at block #{:?}", + change.canon_height, + ); telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; "block" => ?change.canon_height ); @@ -305,6 +308,7 @@ where finalized_hash: H, finalized_number: N, is_descendent_of: &F, + initial_sync: bool, ) -> Result, fork_tree::Error> where F: Fn(&H, &H) -> Result, E: std::error::Error, @@ -328,8 +332,10 @@ where self.pending_forced_changes.clear(); if let Some(change) = change { - info!(target: "afg", "Applying authority set change scheduled at block #{:?}", - change.canon_height); + afg_log!(initial_sync, + "👴 Applying authority set change scheduled at block #{:?}", + change.canon_height, + ); telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; "block" => ?change.canon_height ); @@ -599,11 +605,16 @@ mod tests { ); // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_standard_changes("hash_c", 11, &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); + let status = authorities.apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + false, + ).unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, None); @@ -613,10 +624,15 @@ mod tests { ); // finalizing "hash_d" will enact the change signaled at "hash_a" - let status = authorities.apply_standard_changes("hash_d", 15, &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - _ => unreachable!(), - })).unwrap(); + let status = authorities.apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + false, + ).unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 15))); @@ -671,12 +687,18 @@ mod tests { }); // trying to finalize past `change_c` without finalizing `change_a` first - match authorities.apply_standard_changes("hash_d", 40, &is_descendent_of) { - Err(fork_tree::Error::UnfinalizedAncestor) => {}, - _ => unreachable!(), - } + assert!(matches!( + authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false), + Err(fork_tree::Error::UnfinalizedAncestor) + )); + + let status = authorities.apply_standard_changes( + "hash_b", + 15, + &is_descendent_of, + false, + ).unwrap(); - let status = authorities.apply_standard_changes("hash_b", 15, &is_descendent_of).unwrap(); assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_b", 15))); @@ -684,7 +706,13 @@ mod tests { assert_eq!(authorities.set_id, 1); // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_standard_changes("hash_d", 40, &is_descendent_of).unwrap(); + let status = authorities.apply_standard_changes( + "hash_d", + 40, + &is_descendent_of, + false, + ).unwrap(); + assert!(status.changed); assert_eq!(status.new_set_block, Some(("hash_d", 40))); @@ -817,20 +845,30 @@ mod tests { assert!(authorities.add_pending_change(change_c, &is_descendent_of_a).is_err()); // too early. - assert!(authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true)).unwrap().is_none()); + assert!( + authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false) + .unwrap() + .is_none() + ); // too late. - assert!(authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true)).unwrap().is_none()); + assert!( + authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true), false) + .unwrap() + .is_none() + ); // on time -- chooses the right change. assert_eq!( - authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a).unwrap().unwrap(), + authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false) + .unwrap() + .unwrap(), (42, AuthoritySet { current_authorities: set_a, set_id: 1, pending_standard_changes: ForkTree::new(), pending_forced_changes: Vec::new(), - }) + }), ); } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index 525a4a99bab5861e57b6fc25b49bbcaccaea7220..fe652f52fe2067e728263efe0acd302d2673be2b 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -330,7 +330,7 @@ pub(crate) fn load_persistent( } // genesis. - info!(target: "afg", "Loading GRANDPA authority set \ + info!(target: "afg", "👴 Loading GRANDPA authority set \ from genesis on what appears to be first startup."); let genesis_authorities = genesis_authorities()?; diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 89c507f39af2e52e7fc8766ddd86b2019d74b85f..2d39ed7ec433fa2fc9e4f931e1916e922328137d 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -84,13 +84,13 @@ use sp_runtime::traits::{NumberFor, Block as BlockT, Zero}; use sc_network_gossip::{MessageIntent, ValidatorContext}; -use sc_network::{config::Roles, PeerId, ReputationChange}; +use sc_network::{ObservedRole, PeerId, ReputationChange}; use parity_scale_codec::{Encode, Decode}; use sp_finality_grandpa::AuthorityId; use sc_telemetry::{telemetry, CONSENSUS_DEBUG}; use log::{trace, debug}; -use futures::channel::mpsc; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use prometheus_endpoint::{CounterVec, Opts, PrometheusError, register, Registry, U64}; use rand::seq::SliceRandom; @@ -439,11 +439,11 @@ impl Misbehavior { struct PeerInfo { view: View, - roles: Roles, + roles: ObservedRole, } impl PeerInfo { - fn new(roles: Roles) -> Self { + fn new(roles: ObservedRole) -> Self { PeerInfo { view: View::default(), roles, @@ -469,14 +469,17 @@ impl Default for Peers { } impl Peers { - fn new_peer(&mut self, who: PeerId, roles: Roles) { - if roles.is_authority() && self.lucky_authorities.len() < MIN_LUCKY { - self.lucky_authorities.insert(who.clone()); - } - if !roles.is_authority() && self.lucky_peers.len() < MIN_LUCKY { - self.lucky_peers.insert(who.clone()); + fn new_peer(&mut self, who: PeerId, role: ObservedRole) { + match role { + ObservedRole::Authority if self.lucky_authorities.len() < MIN_LUCKY => { + self.lucky_authorities.insert(who.clone()); + }, + ObservedRole::Full | ObservedRole::Light if self.lucky_peers.len() < MIN_LUCKY => { + self.lucky_peers.insert(who.clone()); + }, + _ => {} } - self.inner.insert(who, PeerInfo::new(roles)); + self.inner.insert(who, PeerInfo::new(role)); } fn peer_disconnected(&mut self, who: &PeerId) { @@ -539,21 +542,28 @@ impl Peers { } fn authorities(&self) -> usize { - self.inner.iter().filter(|(_, info)| info.roles.is_authority()).count() + // Note that our sentry and our validator are neither authorities nor non-authorities. + self.inner.iter().filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)).count() } fn non_authorities(&self) -> usize { - self.inner.iter().filter(|(_, info)| !info.roles.is_authority()).count() + // Note that our sentry and our validator are neither authorities nor non-authorities. + self.inner + .iter() + .filter(|(_, info)| matches!(info.roles, ObservedRole::Full | ObservedRole::Light)) + .count() } fn reshuffle(&mut self) { let mut lucky_peers: Vec<_> = self.inner .iter() - .filter_map(|(id, info)| if !info.roles.is_authority() { Some(id.clone()) } else { None }) + .filter_map(|(id, info)| + if matches!(info.roles, ObservedRole::Full | ObservedRole::Light) { Some(id.clone()) } else { None }) .collect(); let mut lucky_authorities: Vec<_> = self.inner .iter() - .filter_map(|(id, info)| if info.roles.is_authority() { Some(id.clone()) } else { None }) + .filter_map(|(id, info)| + if matches!(info.roles, ObservedRole::Authority) { Some(id.clone()) } else { None }) .collect(); let num_non_authorities = ((lucky_peers.len() as f32).sqrt() as usize) @@ -633,8 +643,11 @@ impl CatchUpConfig { fn request_allowed(&self, peer: &PeerInfo) -> bool { match self { CatchUpConfig::Disabled => false, - CatchUpConfig::Enabled { only_from_authorities, .. } => - !only_from_authorities || peer.roles.is_authority(), + CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { + ObservedRole::Authority | ObservedRole::OurSentry | + ObservedRole::OurGuardedAuthority => true, + _ => !only_from_authorities + } } } } @@ -722,7 +735,15 @@ impl Inner { last_commit: None, }), Some(ref mut v) => if v.set_id == set_id { - return None + if self.authorities != authorities { + debug!(target: "afg", + "Gossip validator noted set {:?} twice with different authorities. \ + Was the authority set hard forked?", + set_id, + ); + self.authorities = authorities; + } + return None; } else { v }, @@ -788,6 +809,7 @@ impl Inner { // ensure authority is part of the set. if !self.authorities.contains(&full.message.id) { + debug!(target: "afg", "Message from unknown voter: {}", full.message.id); telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); return Action::Discard(cost::UNKNOWN_VOTER); } @@ -1112,34 +1134,38 @@ impl Inner { return false; } - if peer.roles.is_authority() { - let authorities = self.peers.authorities(); + match peer.roles { + ObservedRole::OurGuardedAuthority | ObservedRole::OurSentry => true, + ObservedRole::Authority => { + let authorities = self.peers.authorities(); - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - } else { - // the node is not an authority so we apply stricter filters - if round_elapsed >= round_duration * PROPAGATION_ALL { - // if we waited for 3 (or more) rounds - // then it is allowed to be sent to all peers. - true - } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { - // otherwise we only send it to `sqrt(non-authorities)`. - self.peers.lucky_peers.contains(who) - } else { - false - } + // the target node is an authority, on the first round duration we start by + // sending the message to only `sqrt(authorities)` (if we're + // connected to at least `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES + && authorities > MIN_LUCKY + { + self.peers.lucky_authorities.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // authorities for whom it is polite to do so + true + } + }, + ObservedRole::Full | ObservedRole::Light => { + // the node is not an authority so we apply stricter filters + if round_elapsed >= round_duration * PROPAGATION_ALL { + // if we waited for 3 (or more) rounds + // then it is allowed to be sent to all peers. + true + } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { + // otherwise we only send it to `sqrt(non-authorities)`. + self.peers.lucky_peers.contains(who) + } else { + false + } + }, } } @@ -1161,38 +1187,42 @@ impl Inner { let round_duration = self.config.gossip_duration * ROUND_DURATION; let round_elapsed = self.round_start.elapsed(); - if peer.roles.is_authority() { - let authorities = self.peers.authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - } else { - let non_authorities = self.peers.non_authorities(); - - // the target node is not an authority, on the first and second - // round duration we start by sending the message to only - // `sqrt(non_authorities)` (if we're connected to at least - // `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES - && non_authorities > MIN_LUCKY - { - self.peers.lucky_peers.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // non-authorities for whom it is polite to do so - true + match peer.roles { + ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority => true, + ObservedRole::Authority => { + let authorities = self.peers.authorities(); + + // the target node is an authority, on the first round duration we start by + // sending the message to only `sqrt(authorities)` (if we're + // connected to at least `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES + && authorities > MIN_LUCKY + { + self.peers.lucky_authorities.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // authorities for whom it is polite to do so + true + } + }, + ObservedRole::Full | ObservedRole::Light => { + let non_authorities = self.peers.non_authorities(); + + // the target node is not an authority, on the first and second + // round duration we start by sending the message to only + // `sqrt(non_authorities)` (if we're connected to at least + // `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES + && non_authorities > MIN_LUCKY + { + self.peers.lucky_peers.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // non-authorities for whom it is polite to do so + true + } } } } @@ -1224,7 +1254,7 @@ impl Metrics { pub(super) struct GossipValidator { inner: parking_lot::RwLock>, set_state: environment::SharedVoterSetState, - report_sender: mpsc::UnboundedSender, + report_sender: TracingUnboundedSender, metrics: Option, } @@ -1236,7 +1266,7 @@ impl GossipValidator { config: crate::Config, set_state: environment::SharedVoterSetState, prometheus_registry: Option<&Registry>, - ) -> (GossipValidator, mpsc::UnboundedReceiver) { + ) -> (GossipValidator, TracingUnboundedReceiver) { let metrics = match prometheus_registry.map(Metrics::register) { Some(Ok(metrics)) => Some(metrics), Some(Err(e)) => { @@ -1246,7 +1276,7 @@ impl GossipValidator { None => None, }; - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator"); let val = GossipValidator { inner: parking_lot::RwLock::new(Inner::new(config)), set_state, @@ -1380,10 +1410,15 @@ impl GossipValidator { (action, broadcast_topics, peer_reply) } + + #[cfg(test)] + fn inner(&self) -> &parking_lot::RwLock> { + &self.inner + } } impl sc_network_gossip::Validator for GossipValidator { - fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: Roles) { + fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: ObservedRole) { let packet = { let mut inner = self.inner.write(); inner.peers.new_peer(who.clone(), roles); @@ -1643,7 +1678,7 @@ mod tests { assert!(res.unwrap().is_none()); // connect & disconnect. - peers.new_peer(id.clone(), Roles::AUTHORITY); + peers.new_peer(id.clone(), ObservedRole::Authority); peers.peer_disconnected(&id); let res = peers.update_peer_state(&id, update.clone()); @@ -1679,7 +1714,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), Roles::AUTHORITY); + peers.new_peer(id.clone(), ObservedRole::Authority); let mut check_update = move |update: NeighborPacket<_>| { let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); @@ -1699,7 +1734,7 @@ mod tests { let mut peers = Peers::default(); let id = PeerId::random(); - peers.new_peer(id.clone(), Roles::AUTHORITY); + peers.new_peer(id.clone(), ObservedRole::Authority); peers.update_peer_state(&id, NeighborPacket { round: Round(10), @@ -1900,7 +1935,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let mut inner = val.inner.write(); - inner.peers.new_peer(peer.clone(), Roles::AUTHORITY); + inner.peers.new_peer(peer.clone(), ObservedRole::Authority); let res = inner.handle_catch_up_request( &peer, @@ -1951,7 +1986,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); let send_request = |set_id, round| { let mut inner = val.inner.write(); @@ -2031,7 +2066,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); let import_neighbor_message = |set_id, round| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2105,7 +2140,7 @@ mod tests { // add the peer making the request to the validator, // otherwise it is discarded. let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); // importing a neighbor message from a peer in the same set in a later // round should lead to a catch up request but since they're disabled @@ -2141,8 +2176,8 @@ mod tests { let peer_authority = PeerId::random(); let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_authority.clone(), Roles::AUTHORITY); - val.inner.write().peers.new_peer(peer_full.clone(), Roles::FULL); + val.inner.write().peers.new_peer(peer_authority.clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let import_neighbor_message = |peer| { let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( @@ -2199,7 +2234,7 @@ mod tests { // add the peer making the requests to the validator, otherwise it is // discarded. let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_full.clone(), Roles::FULL); + val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( &peer_full, @@ -2276,8 +2311,8 @@ mod tests { full_nodes.resize_with(30, || PeerId::random()); for i in 0..30 { - val.inner.write().peers.new_peer(authorities[i].clone(), Roles::AUTHORITY); - val.inner.write().peers.new_peer(full_nodes[i].clone(), Roles::FULL); + val.inner.write().peers.new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); } let test = |num_round, peers| { @@ -2349,7 +2384,7 @@ mod tests { let mut authorities = Vec::new(); for _ in 0..5 { let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); authorities.push(peer_id); } @@ -2389,7 +2424,7 @@ mod tests { let mut authorities = Vec::new(); for _ in 0..100 { let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), Roles::AUTHORITY); + val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); authorities.push(peer_id); } @@ -2440,7 +2475,7 @@ mod tests { val.inner .write() .peers - .new_peer(peer1.clone(), Roles::AUTHORITY); + .new_peer(peer1.clone(), ObservedRole::Authority); val.inner .write() @@ -2460,7 +2495,7 @@ mod tests { val.inner .write() .peers - .new_peer(peer2.clone(), Roles::AUTHORITY); + .new_peer(peer2.clone(), ObservedRole::Authority); // create a commit for round 1 of set id 1 // targeting a block at height 2 @@ -2555,4 +2590,19 @@ mod tests { &commit(0, 1, 2), )); } + + #[test] + fn allow_noting_different_authorities_for_same_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + let a1 = vec![AuthorityId::default()]; + val.note_set(SetId(1), a1.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a1); + + let a2 = vec![AuthorityId::default(), AuthorityId::default()]; + val.note_set(SetId(1), a2.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a2); + } } diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 82f24ff05b3d5f028c6485d7f7daf02beb64f695..7daa121513416087b8c25d4664acc8e25acaffc4 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -58,6 +58,7 @@ use gossip::{ use sp_finality_grandpa::{ AuthorityPair, AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, }; +use sp_utils::mpsc::TracingUnboundedReceiver; pub mod gossip; mod periodic; @@ -165,7 +166,7 @@ pub(crate) struct NetworkBridge> { // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer // channel implementation. - gossip_validator_report_stream: Arc>>, + gossip_validator_report_stream: Arc>>, } impl> Unpin for NetworkBridge {} @@ -451,8 +452,9 @@ impl> Future for NetworkBridge { } match self.gossip_engine.lock().poll_unpin(cx) { - // The gossip engine future finished. We should do the same. - Poll::Ready(()) => return Poll::Ready(Ok(())), + Poll::Ready(()) => return Poll::Ready( + Err(Error::Network("Gossip engine future finished.".into())) + ), Poll::Pending => {}, } @@ -474,12 +476,12 @@ fn incoming_global( gossip_validator: &Arc>, voters: &VoterSet, | { - let precommits_signed_by: Vec = - msg.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); - if voters.len() <= TELEMETRY_VOTERS_LIMIT { + let precommits_signed_by: Vec = + msg.message.auth_data.iter().map(move |(_, a)| { + format!("{}", a) + }).collect(); + telemetry!(CONSENSUS_INFO; "afg.received_commit"; "contains_precommits_signed_by" => ?precommits_signed_by, "target_number" => ?msg.message.target_number.clone(), diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index f2e79e8f14486e5bb0951acfd701e44aaec5a28c..f894624bdf79e6e000f7a113c804517633301cb9 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -17,9 +17,10 @@ //! Periodic rebroadcast of neighbor packets. use futures_timer::Delay; -use futures::{channel::mpsc, future::{FutureExt as _}, prelude::*, ready, stream::Stream}; +use futures::{future::{FutureExt as _}, prelude::*, ready, stream::Stream}; use log::debug; use std::{pin::Pin, task::{Context, Poll}, time::Duration}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sc_network::PeerId; use sp_runtime::traits::{NumberFor, Block as BlockT}; @@ -31,7 +32,7 @@ const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( - mpsc::UnboundedSender<(Vec, NeighborPacket>)> + TracingUnboundedSender<(Vec, NeighborPacket>)> ); impl NeighborPacketSender { @@ -54,14 +55,15 @@ impl NeighborPacketSender { pub(super) struct NeighborPacketWorker { last: Option<(Vec, NeighborPacket>)>, delay: Delay, - rx: mpsc::UnboundedReceiver<(Vec, NeighborPacket>)>, + rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, } impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { pub(super) fn new() -> (Self, NeighborPacketSender){ - let (tx, rx) = mpsc::unbounded::<(Vec, NeighborPacket>)>(); + let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)> + ("mpsc_grandpa_neighbor_packet_worker"); let delay = Delay::new(REBROADCAST_AFTER); (NeighborPacketWorker { diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index f964cea469c3f3435e520e3370e8bdfb1c1e06ee..ea995eff6390dd789632b0635fdd21b598561478 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -16,9 +16,9 @@ //! Tests for the communication portion of the GRANDPA crate. -use futures::channel::mpsc; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use futures::prelude::*; -use sc_network::{Event as NetworkEvent, PeerId, config::Roles}; +use sc_network::{Event as NetworkEvent, ObservedRole, PeerId}; use sc_network_test::{Block, Hash}; use sc_network_gossip::Validator; use std::sync::Arc; @@ -33,7 +33,7 @@ use super::{AuthorityId, VoterSet, Round, SetId}; #[derive(Debug)] pub(crate) enum Event { - EventStream(mpsc::UnboundedSender), + EventStream(TracingUnboundedSender), WriteNotification(sc_network::PeerId, Vec), Report(sc_network::PeerId, sc_network::ReputationChange), Announce(Hash), @@ -41,12 +41,12 @@ pub(crate) enum Event { #[derive(Clone)] pub(crate) struct TestNetwork { - sender: mpsc::UnboundedSender, + sender: TracingUnboundedSender, } impl sc_network_gossip::Network for TestNetwork { fn event_stream(&self) -> Pin + Send>> { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("test"); let _ = self.sender.unbounded_send(Event::EventStream(tx)); Box::pin(rx) } @@ -97,7 +97,7 @@ impl sc_network_gossip::ValidatorContext for TestNetwork { pub(crate) struct Tester { pub(crate) net_handle: super::NetworkBridge, gossip_validator: Arc>, - pub(crate) events: mpsc::UnboundedReceiver, + pub(crate) events: TracingUnboundedReceiver, } impl Tester { @@ -161,7 +161,7 @@ pub(crate) fn make_test_network() -> ( impl Future, TestNetwork, ) { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("test"); let net = TestNetwork { sender: tx }; #[derive(Clone)] @@ -256,7 +256,7 @@ fn good_commit_leads_to_relay() { let test = make_test_network().0 .then(move |tester| { // register a peer. - tester.gossip_validator.new_peer(&mut NoopContext, &id, sc_network::config::Roles::FULL); + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); future::ready((tester, id)) }) .then(move |(tester, id)| { @@ -284,7 +284,7 @@ fn good_commit_leads_to_relay() { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), engine_id: GRANDPA_ENGINE_ID, - roles: Roles::FULL, + role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { @@ -297,7 +297,7 @@ fn good_commit_leads_to_relay() { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: receiver_id.clone(), engine_id: GRANDPA_ENGINE_ID, - roles: Roles::FULL, + role: ObservedRole::Full, }); // Announce its local set has being on the current set id through a neighbor @@ -357,7 +357,7 @@ fn good_commit_leads_to_relay() { #[test] fn bad_commit_leads_to_report() { - env_logger::init(); + let _ = env_logger::try_init(); let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let public = make_ids(&private[..]); let voter_set = Arc::new(public.iter().cloned().collect::>()); @@ -404,7 +404,7 @@ fn bad_commit_leads_to_report() { let test = make_test_network().0 .map(move |tester| { // register a peer. - tester.gossip_validator.new_peer(&mut NoopContext, &id, sc_network::config::Roles::FULL); + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); (tester, id) }) .then(move |(tester, id)| { @@ -431,7 +431,7 @@ fn bad_commit_leads_to_report() { let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { remote: sender_id.clone(), engine_id: GRANDPA_ENGINE_ID, - roles: Roles::FULL, + role: ObservedRole::Full, }); let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { remote: sender_id.clone(), @@ -482,7 +482,7 @@ fn peer_with_higher_view_leads_to_catch_up_request() { let test = tester .map(move |tester| { // register a peer with authority role. - tester.gossip_validator.new_peer(&mut NoopContext, &id, sc_network::config::Roles::AUTHORITY); + tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Authority); (tester, id) }) .then(move |(tester, id)| { diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index a0f37f20cb390926cb6c43662e1370772863313b..d3bbc1adb3cb951b2ce18df858e50a5b24b4731a 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -20,7 +20,7 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use log::{debug, warn, info}; +use log::{debug, warn}; use parity_scale_codec::{Decode, Encode}; use futures::prelude::*; use futures_timer::Delay; @@ -636,6 +636,7 @@ where self.client.clone(), incoming, "round", + None, ).map_err(Into::into)); // schedule network message cleanup when sink drops. @@ -910,6 +911,7 @@ where hash, number, (round, commit).into(), + false, ) } @@ -969,6 +971,7 @@ pub(crate) fn finalize_block( hash: Block::Hash, number: NumberFor, justification_or_commit: JustificationOrCommit, + initial_sync: bool, ) -> Result<(), CommandOrError>> where Block: BlockT, BE: Backend, @@ -1010,6 +1013,7 @@ pub(crate) fn finalize_block( hash, number, &is_descendent_of::(&*client, None), + initial_sync, ).map_err(|e| Error::Safety(e.to_string()))?; // check if this is this is the first finalization of some consensus changes @@ -1090,9 +1094,15 @@ pub(crate) fn finalize_block( let (new_id, set_ref) = authority_set.current(); if set_ref.len() > 16 { - info!("Applying GRANDPA set change to new set with {} authorities", set_ref.len()); + afg_log!(initial_sync, + "👴 Applying GRANDPA set change to new set with {} authorities", + set_ref.len(), + ); } else { - info!("Applying GRANDPA set change to new set {:?}", set_ref); + afg_log!(initial_sync, + "👴 Applying GRANDPA set change to new set {:?}", + set_ref, + ); } telemetry!(CONSENSUS_INFO; "afg.generating_new_authority_set"; diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index ea1deccdafbf5d4a428947917b797779ff0957fe..c1e32dfa6cc9e3847c18030f6f88c666d182a611 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -16,21 +16,21 @@ use std::{sync::Arc, collections::HashMap}; -use log::{debug, trace, info}; +use log::{debug, trace}; use parity_scale_codec::Encode; -use futures::channel::mpsc; use parking_lot::RwLockWriteGuard; use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sp_utils::mpsc::TracingUnboundedSender; use sp_api::{TransactionFor}; use sp_consensus::{ BlockImport, Error as ConsensusError, - BlockCheckParams, BlockImportParams, ImportResult, JustificationImport, + BlockCheckParams, BlockImportParams, BlockOrigin, ImportResult, JustificationImport, SelectChain, }; -use sp_finality_grandpa::{GRANDPA_ENGINE_ID, ScheduledChange, ConsensusLog}; +use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; use sp_runtime::Justification; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{ @@ -57,8 +57,9 @@ pub struct GrandpaBlockImport { inner: Arc, select_chain: SC, authority_set: SharedAuthoritySet>, - send_voter_commands: mpsc::UnboundedSender>>, + send_voter_commands: TracingUnboundedSender>>, consensus_changes: SharedConsensusChanges>, + authority_set_hard_forks: HashMap>>, _phantom: PhantomData, } @@ -72,6 +73,7 @@ impl Clone for authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), consensus_changes: self.consensus_changes.clone(), + authority_set_hard_forks: self.authority_set_hard_forks.clone(), _phantom: PhantomData, } } @@ -126,7 +128,12 @@ impl JustificationImport number: NumberFor, justification: Justification, ) -> Result<(), Self::Error> { - GrandpaBlockImport::import_justification(self, hash, number, justification, false) + // this justification was requested by the sync service, therefore we + // are not sure if it should enact a change or not. it could have been a + // request made as part of initial sync but that means the justification + // wasn't part of the block and was requested asynchronously, probably + // makes sense to log in that case. + GrandpaBlockImport::import_justification(self, hash, number, justification, false, false) } } @@ -212,9 +219,16 @@ where Client: crate::ClientForGrandpa, { // check for a new authority set change. - fn check_new_change(&self, header: &Block::Header, hash: Block::Hash) - -> Option>> - { + fn check_new_change( + &self, + header: &Block::Header, + hash: Block::Hash, + ) -> Option>> { + // check for forced authority set hard forks + if let Some(change) = self.authority_set_hard_forks.get(&hash) { + return Some(change.clone()); + } + // check for forced change. if let Some((median_last_finalized, change)) = find_forced_change::(header) { return Some(PendingChange { @@ -241,6 +255,7 @@ where &self, block: &mut BlockImportParams>, hash: Block::Hash, + initial_sync: bool, ) -> Result, ConsensusError> { // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore @@ -315,7 +330,9 @@ where } let applied_changes = { - let forced_change_set = guard.as_mut().apply_forced_changes(hash, number, &is_descendent_of) + let forced_change_set = guard + .as_mut() + .apply_forced_changes(hash, number, &is_descendent_of, initial_sync) .map_err(|e| ConsensusError::ClientImport(e.to_string())) .map_err(ConsensusError::from)?; @@ -410,7 +427,10 @@ impl BlockImport Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), } - let pending_changes = self.make_authorities_changes(&mut block, hash)?; + // on initial sync we will restrict logging under info to avoid spam. + let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; + + let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; // we don't want to finalize on `inner.import_block` let mut justification = block.justification.take(); @@ -483,7 +503,15 @@ impl BlockImport match justification { Some(justification) => { - self.import_justification(hash, number, justification, needs_justification).unwrap_or_else(|err| { + let import_res = self.import_justification( + hash, + number, + justification, + needs_justification, + initial_sync, + ); + + import_res.unwrap_or_else(|err| { if needs_justification || enacts_consensus_change { debug!(target: "afg", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); @@ -527,15 +555,52 @@ impl GrandpaBlockImport, select_chain: SC, authority_set: SharedAuthoritySet>, - send_voter_commands: mpsc::UnboundedSender>>, + send_voter_commands: TracingUnboundedSender>>, consensus_changes: SharedConsensusChanges>, + authority_set_hard_forks: Vec<(SetId, PendingChange>)>, ) -> GrandpaBlockImport { + // check for and apply any forced authority set hard fork that applies + // to the *current* authority set. + if let Some((_, change)) = authority_set_hard_forks + .iter() + .find(|(set_id, _)| *set_id == authority_set.set_id()) + { + let mut authority_set = authority_set.inner().write(); + authority_set.current_authorities = change.next_authorities.clone(); + } + + // index authority set hard forks by block hash so that they can be used + // by any node syncing the chain and importing a block hard fork + // authority set changes. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|(_, change)| (change.canon_hash, change)) + .collect::>(); + + // check for and apply any forced authority set hard fork that apply to + // any *pending* standard changes, checking by the block hash at which + // they were announced. + { + let mut authority_set = authority_set.inner().write(); + + authority_set.pending_standard_changes = authority_set + .pending_standard_changes + .clone() + .map(&mut |hash, _, original| { + authority_set_hard_forks + .get(&hash) + .cloned() + .unwrap_or(original) + }); + } + GrandpaBlockImport { inner, select_chain, authority_set, send_voter_commands, consensus_changes, + authority_set_hard_forks, _phantom: PhantomData, } } @@ -558,6 +623,7 @@ where number: NumberFor, justification: Justification, enacts_change: bool, + initial_sync: bool, ) -> Result<(), ConsensusError> { let justification = GrandpaJustification::decode_and_verify_finalizes( &justification, @@ -579,12 +645,17 @@ where hash, number, justification.into(), + initial_sync, ); match result { Err(CommandOrError::VoterCommand(command)) => { - info!(target: "afg", "Imported justification for block #{} that triggers \ - command {}, signaling voter.", number, command); + afg_log!(initial_sync, + "👴 Imported justification for block #{} that triggers \ + command {}, signaling voter.", + number, + command, + ); // send the command to the voter let _ = self.send_voter_commands.unbounded_send(command); diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 91da62848c534b7db461b82e01b0678a288a642a..6fab89ac68acb1f03ed742df06724b62ffb2d54a 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -55,7 +55,6 @@ use futures::prelude::*; use futures::StreamExt; use log::{debug, info}; -use futures::channel::mpsc; use sc_client_api::{ backend::{AuxStore, Backend}, LockImportRun, BlockchainEvents, CallExecutor, @@ -63,12 +62,14 @@ use sc_client_api::{ }; use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; use parity_scale_codec::{Decode, Encode}; +use prometheus_endpoint::{PrometheusError, Registry}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; use sc_keystore::KeyStorePtr; use sp_inherents::InherentDataProviders; use sp_consensus::{SelectChain, BlockImport}; use sp_core::Pair; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; use sc_telemetry::{telemetry, CONSENSUS_INFO, CONSENSUS_DEBUG}; use serde_json; @@ -83,6 +84,23 @@ use std::time::Duration; use std::pin::Pin; use std::task::{Poll, Context}; +// utility logging macro that takes as first argument a conditional to +// decide whether to log under debug or info level (useful to restrict +// logging under initial sync). +macro_rules! afg_log { + ($condition:expr, $($msg: expr),+ $(,)?) => { + { + let log_level = if $condition { + log::Level::Debug + } else { + log::Level::Info + }; + + log::log!(target: "afg", log_level, $($msg),+); + } + }; +} + mod authorities; mod aux_schema; mod communication; @@ -104,7 +122,7 @@ pub use voting_rule::{ }; use aux_schema::PersistentData; -use environment::{Environment, VoterSetState, Metrics}; +use environment::{Environment, VoterSetState}; use import::GrandpaBlockImport; use until_imported::UntilGlobalMessageBlocksImported; use communication::{NetworkBridge, Network as NetworkT}; @@ -378,7 +396,7 @@ pub struct LinkHalf { client: Arc, select_chain: SC, persistent_data: PersistentData, - voter_commands_rx: mpsc::UnboundedReceiver>>, + voter_commands_rx: TracingUnboundedReceiver>>, } /// Provider for the Grandpa authority set configured on the genesis block. @@ -417,10 +435,43 @@ pub fn block_import( client: Arc, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, -) -> Result<( +) -> Result< + ( GrandpaBlockImport, LinkHalf, - ), ClientError> + ), + ClientError, +> +where + SC: SelectChain, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, +{ + block_import_with_authority_set_hard_forks( + client, + genesis_authorities_provider, + select_chain, + Default::default(), + ) +} + +/// Make block importer and link half necessary to tie the background voter to +/// it. A vector of authority set hard forks can be passed, any authority set +/// change signaled at the given block (either already signalled or in a further +/// block when importing it) will be replaced by a standard change with the +/// given static authorities. +pub fn block_import_with_authority_set_hard_forks( + client: Arc, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + select_chain: SC, + authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, +) -> Result< + ( + GrandpaBlockImport, + LinkHalf, + ), + ClientError, +> where SC: SelectChain, BE: Backend + 'static, @@ -442,7 +493,25 @@ where } )?; - let (voter_commands_tx, voter_commands_rx) = mpsc::unbounded(); + let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); + + // create pending change objects with 0 delay and enacted on finality + // (i.e. standard changes) for each authority set hard fork. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|(set_id, (hash, number), authorities)| { + ( + set_id, + authorities::PendingChange { + next_authorities: authorities, + delay: Zero::zero(), + canon_hash: hash, + canon_height: number, + delay_kind: authorities::DelayKind::Finalized, + }, + ) + }) + .collect(); Ok(( GrandpaBlockImport::new( @@ -451,6 +520,7 @@ where persistent_data.authority_set.clone(), voter_commands_tx, persistent_data.consensus_changes.clone(), + authority_set_hard_forks, ), LinkHalf { client, @@ -467,6 +537,7 @@ fn global_communication( client: Arc, network: &NetworkBridge, keystore: &Option, + metrics: Option, ) -> ( impl Stream< Item = Result, CommandOrError>>, @@ -497,6 +568,7 @@ fn global_communication( client.clone(), global_in, "global", + metrics, ); let global_in = global_in.map_err(CommandOrError::from); @@ -543,7 +615,7 @@ pub struct GrandpaParams { /// The inherent data providers. pub inherent_data_providers: InherentDataProviders, /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option>, + pub telemetry_on_connect: Option>, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, /// The prometheus metrics registry. @@ -644,13 +716,30 @@ pub fn run_grandpa_voter( Ok(future::select(voter_work, telemetry_task).map(drop)) } +struct Metrics { + environment: environment::Metrics, + until_imported: until_imported::Metrics, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Metrics { + environment: environment::Metrics::register(registry)?, + until_imported: until_imported::Metrics::register(registry)?, + }) + } +} + /// Future that powers the voter. #[must_use] struct VoterWork, SC, VR> { voter: Pin>>> + Send>>, env: Arc>, - voter_commands_rx: mpsc::UnboundedReceiver>>, + voter_commands_rx: TracingUnboundedReceiver>>, network: NetworkBridge, + + /// Prometheus metrics. + metrics: Option, } impl VoterWork @@ -670,9 +759,17 @@ where select_chain: SC, voting_rule: VR, persistent_data: PersistentData, - voter_commands_rx: mpsc::UnboundedReceiver>>, + voter_commands_rx: TracingUnboundedReceiver>>, prometheus_registry: Option, ) -> Self { + let metrics = match prometheus_registry.as_ref().map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: "afg", "Failed to register metrics: {:?}", e); + None + } + None => None, + }; let voters = persistent_data.authority_set.current_authorities(); let env = Arc::new(Environment { @@ -686,10 +783,7 @@ where authority_set: persistent_data.authority_set.clone(), consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state.clone(), - metrics: prometheus_registry.map(|registry| { - Metrics::register(®istry) - .expect("Other metrics would have failed to register before these; qed") - }), + metrics: metrics.as_ref().map(|m| m.environment.clone()), _phantom: PhantomData, }); @@ -700,6 +794,7 @@ where env, voter_commands_rx, network, + metrics, }; work.rebuild_voter(); work @@ -748,6 +843,7 @@ where self.env.client.clone(), &self.env.network, &self.env.config.keystore, + self.metrics.as_ref().map(|m| m.until_imported.clone()), ); let last_completed_round = completed_rounds.last(); diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 971c2132904f6b325d6f76934a133fd178e87c0d..1e6c8ddf18820e4de9e56e26c15d53491c286889 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -18,7 +18,7 @@ use std::pin::Pin; use std::sync::Arc; use std::task::{Context, Poll}; -use futures::{prelude::*, channel::mpsc}; +use futures::prelude::*; use finality_grandpa::{ BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet @@ -27,8 +27,10 @@ use log::{debug, info, warn}; use sp_consensus::SelectChain; use sc_client_api::backend::Backend; +use sp_utils::mpsc::TracingUnboundedReceiver; use sp_runtime::traits::{NumberFor, Block as BlockT}; use sp_blockchain::HeaderMetadata; + use crate::{ global_communication, CommandOrError, CommunicationIn, Config, environment, LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, @@ -122,6 +124,7 @@ fn grandpa_observer( finalized_hash, finalized_number, (round, commit).into(), + false, ) { Ok(_) => {}, Err(e) => return future::err(e), @@ -206,7 +209,7 @@ struct ObserverWork> { network: NetworkBridge, persistent_data: PersistentData, keystore: Option, - voter_commands_rx: mpsc::UnboundedReceiver>>, + voter_commands_rx: TracingUnboundedReceiver>>, _phantom: PhantomData, } @@ -223,7 +226,7 @@ where network: NetworkBridge, persistent_data: PersistentData, keystore: Option, - voter_commands_rx: mpsc::UnboundedReceiver>>, + voter_commands_rx: TracingUnboundedReceiver>>, ) -> Self { let mut work = ObserverWork { @@ -255,6 +258,7 @@ where self.client.clone(), &self.network, &self.keystore, + None, ); let last_finalized_number = self.client.info().finalized_number; @@ -375,6 +379,7 @@ mod tests { use super::*; use assert_matches::assert_matches; + use sp_utils::mpsc::tracing_unbounded; use crate::{aux_schema, communication::tests::{Event, make_test_network}}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use sc_network::PeerId; @@ -411,7 +416,7 @@ mod tests { || Ok(vec![]), ).unwrap(); - let (_tx, voter_command_rx) = mpsc::unbounded(); + let (_tx, voter_command_rx) = tracing_unbounded(""); let observer = ObserverWork::new( client, tester.net_handle.clone(), @@ -425,21 +430,16 @@ mod tests { tester.trigger_gossip_validator_reputation_change(&peer_id); executor::block_on(async move { + // Poll the observer once and have it forward the reputation change from the gossip + // validator to the test network. + assert!(observer.now_or_never().is_none()); + // Ignore initial event stream request by gossip engine. match tester.events.next().now_or_never() { Some(Some(Event::EventStream(_))) => {}, _ => panic!("expected event stream request"), }; - assert!( - tester.events.next().now_or_never().is_none(), - "expect no further network events", - ); - - // Poll the observer once and have it forward the reputation change from the gossip - // validator to the test network. - assert!(observer.now_or_never().is_none()); - assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _)))); }); } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 50800f478476f02d36a346a519af9bb081d18dad..d7d1d1e48d3a0f0f051b1c4a35f4a044ea8ae2a7 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -22,7 +22,7 @@ use sc_network_test::{ Block, Hash, TestNetFactory, BlockImportAdapter, Peer, PeersClient, PassThroughVerifier, PeersFullClient, }; -use sc_network::config::{ProtocolConfig, Roles, BoxFinalityProofRequestBuilder}; +use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; use parking_lot::Mutex; use futures_timer::Delay; use tokio::runtime::{Runtime, Handle}; @@ -30,21 +30,17 @@ use sp_keyring::Ed25519Keyring; use sc_client::LongestChain; use sc_client_api::backend::TransactionFor; use sp_blockchain::Result; -use sp_api::{ApiRef, ApiErrorExt, Core, RuntimeVersion, ApiExt, StorageProof, ProvideRuntimeApi}; +use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, import_queue::{BoxJustificationImport, BoxFinalityProofImport}, }; -use std::{ - collections::{HashMap, HashSet}, - result, - pin::Pin, -}; +use std::{collections::{HashMap, HashSet}, pin::Pin}; use parity_scale_codec::Decode; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; use sp_runtime::generic::{BlockId, DigestItem}; -use sp_core::{H256, NativeOrEncoded, ExecutionContext, crypto::Public}; +use sp_core::{H256, crypto::Public}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, GrandpaApi}; use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; @@ -78,9 +74,8 @@ impl GrandpaTestNet { peers: Vec::with_capacity(n_peers), test_config, }; - let config = Self::default_config(); for _ in 0..n_peers { - net.add_full_peer(&config); + net.add_full_peer(); } net } @@ -99,10 +94,8 @@ impl TestNetFactory for GrandpaTestNet { } fn default_config() -> ProtocolConfig { - // the authority role ensures gossip hits all nodes here. - let mut config = ProtocolConfig::default(); - config.roles = Roles::AUTHORITY; - config + // This is unused. + ProtocolConfig::default() } fn make_verifier( @@ -214,87 +207,13 @@ impl ProvideRuntimeApi for TestApi { } } -impl Core for RuntimeApi { - fn Core_version_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } - - fn Core_execute_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } +sp_api::mock_impl_runtime_apis! { + impl GrandpaApi for RuntimeApi { + type Error = sp_blockchain::Error; - fn Core_initialize_block_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<&::Header>, - _: Vec, - ) -> Result> { - unimplemented!("Not required for testing!") - } -} - -impl ApiErrorExt for RuntimeApi { - type Error = sp_blockchain::Error; -} - -impl ApiExt for RuntimeApi { - type StateBackend = < - substrate_test_runtime_client::Backend as sc_client_api::backend::Backend - >::State; - - fn map_api_result result::Result, R, E>( - &self, - _: F - ) -> result::Result { - unimplemented!("Not required for testing!") - } - - fn runtime_version_at(&self, _: &BlockId) -> Result { - unimplemented!("Not required for testing!") - } - - fn record_proof(&mut self) { - unimplemented!("Not required for testing!") - } - - fn extract_proof(&mut self) -> Option { - unimplemented!("Not required for testing!") - } - - fn into_storage_changes( - &self, - _: &Self::StateBackend, - _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, - _: ::Hash, - ) -> std::result::Result, String> - where Self: Sized - { - unimplemented!("Not required for testing!") - } -} - -impl GrandpaApi for RuntimeApi { - fn GrandpaApi_grandpa_authorities_runtime_api_impl( - &self, - _: &BlockId, - _: ExecutionContext, - _: Option<()>, - _: Vec, - ) -> Result> { - Ok(self.inner.genesis_authorities.clone()).map(NativeOrEncoded::Native) + fn grandpa_authorities(&self) -> AuthorityList { + self.inner.genesis_authorities.clone() + } } } @@ -1074,7 +993,7 @@ fn voter_persists_its_votes() { use std::iter::FromIterator; use std::sync::atomic::{AtomicUsize, Ordering}; use futures::future; - use futures::channel::mpsc; + use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; let _ = env_logger::try_init(); let mut runtime = Runtime::new().unwrap(); @@ -1099,7 +1018,7 @@ fn voter_persists_its_votes() { // channel between the voter and the main controller. // sending a message on the `voter_tx` restarts the voter. - let (voter_tx, voter_rx) = mpsc::unbounded::<()>(); + let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); let mut keystore_paths = Vec::new(); @@ -1112,7 +1031,7 @@ fn voter_persists_its_votes() { struct ResettableVoter { voter: Pin + Send + Unpin>>, - voter_rx: mpsc::UnboundedReceiver<()>, + voter_rx: TracingUnboundedReceiver<()>, net: Arc>, client: PeersClient, keystore: KeyStorePtr, @@ -1381,7 +1300,7 @@ fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { let peers = &[Ed25519Keyring::Alice]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); - net.add_light_peer(&GrandpaTestNet::default_config()); + net.add_light_peer(); // import block#1 WITH consensus data change. Light client ignores justification // && instead fetches finality proof for block #1 @@ -1458,7 +1377,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ run_to_completion(&mut runtime, 11, net.clone(), peers_a); // request finalization by light client - net.lock().add_light_peer(&GrandpaTestNet::default_config()); + net.lock().add_light_peer(); net.lock().block_until_sync(); // check block, finalized on light client diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index c3a52fcf56fe197602ba844e61500686820f42cf..40da7707b6735262d115015422ef3b01c69eebff 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -29,63 +29,134 @@ use super::{ }; use log::{debug, warn}; -use sc_client_api::{BlockImportNotification, ImportNotifications}; +use sp_utils::mpsc::TracingUnboundedReceiver; use futures::prelude::*; use futures::stream::Fuse; use futures_timer::Delay; -use futures::channel::mpsc::UnboundedReceiver; use finality_grandpa::voter; use parking_lot::Mutex; +use prometheus_endpoint::{ + Gauge, U64, PrometheusError, register, Registry, +}; +use sc_client_api::{BlockImportNotification, ImportNotifications}; +use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; -use std::sync::{atomic::{AtomicUsize, Ordering}, Arc}; +use std::sync::Arc; use std::task::{Context, Poll}; use std::time::{Duration, Instant}; -use sp_finality_grandpa::AuthorityId; const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); -// something which will block until imported. +/// Something that needs to be withheld until specific blocks are available. +/// +/// For example a GRANDPA commit message which is not of any use without the corresponding block +/// that it commits on. pub(crate) trait BlockUntilImported: Sized { - // the type that is blocked on. + /// The type that is blocked on. type Blocked; - /// new incoming item. For all internal items, - /// check if they require to be waited for. - /// if so, call the `Wait` closure. - /// if they are ready, call the `Ready` closure. - fn schedule_wait( + /// Check if a new incoming item needs awaiting until a block(s) is imported. + fn needs_waiting>( input: Self::Blocked, status_check: &S, - wait: Wait, - ready: Ready, - ) -> Result<(), Error> where - S: BlockStatusT, - Wait: FnMut(Block::Hash, NumberFor, Self), - Ready: FnMut(Self::Blocked); + ) -> Result, Error>; /// called when the wait has completed. The canonical number is passed through /// for further checks. fn wait_completed(self, canon_number: NumberFor) -> Option; } +/// Describes whether a given [`BlockUntilImported`] (a) should be discarded, (b) is waiting for +/// specific blocks to be imported or (c) is ready to be used. +/// +/// A reason for discarding a [`BlockUntilImported`] would be if a referenced block is perceived +/// under a different number than specified in the message. +pub(crate) enum DiscardWaitOrReady { + Discard, + Wait(Vec<(Block::Hash, NumberFor, W)>), + Ready(R), +} + +/// Prometheus metrics for the `UntilImported` queue. +// +// At a given point in time there can be more than one `UntilImported` queue. One can not register a +// metric twice, thus queues need to share the same Prometheus metrics instead of instantiating +// their own ones. +// +// When a queue is dropped it might still contain messages. In order for those to not distort the +// Prometheus metrics, the `Metric` struct cleans up after itself within its `Drop` implementation +// by subtracting the local_waiting_messages (the amount of messages left in the queue about to +// be dropped) from the global_waiting_messages gauge. +pub(crate) struct Metrics { + global_waiting_messages: Gauge, + local_waiting_messages: u64, +} + +impl Metrics { + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + global_waiting_messages: register(Gauge::new( + "finality_grandpa_until_imported_waiting_messages_number", + "Number of finality grandpa messages waiting within the until imported queue.", + )?, registry)?, + local_waiting_messages: 0, + }) + } + + fn waiting_messages_inc(&mut self) { + self.local_waiting_messages += 1; + self.global_waiting_messages.inc(); + } + + fn waiting_messages_dec(&mut self) { + self.local_waiting_messages -= 1; + self.global_waiting_messages.dec(); + } +} + + +impl Clone for Metrics { + fn clone(&self) -> Self { + Metrics { + global_waiting_messages: self.global_waiting_messages.clone(), + // When cloned, reset local_waiting_messages, so the global counter is not reduced a + // second time for the same messages on `drop` of the clone. + local_waiting_messages: 0, + } + } +} + +impl Drop for Metrics { + fn drop(&mut self) { + // Reduce the global counter by the amount of messages that were still left in the dropped + // queue. + self.global_waiting_messages.sub(self.local_waiting_messages) + } +} + /// Buffering imported messages until blocks with given hashes are imported. #[pin_project::pin_project] pub(crate) struct UntilImported> { - import_notifications: Fuse>>, + import_notifications: Fuse>>, block_sync_requester: BlockSyncRequester, status_check: BlockStatus, #[pin] inner: Fuse, ready: VecDeque, + /// Interval at which to check status of each awaited block. check_pending: Pin> + Send>>, /// Mapping block hashes to their block number, the point in time it was /// first encountered (Instant) and a list of GRANDPA messages referencing /// the block hash. pending: HashMap, Instant, Vec)>, + + /// Queue identifier for differentiation in logs. identifier: &'static str, + /// Prometheus metrics. + metrics: Option, } impl UntilImported where @@ -102,6 +173,7 @@ impl UntilImported, ) -> Self { // how often to check if pending messages that are waiting for blocks to be // imported can be checked. @@ -125,6 +197,7 @@ impl UntilImported Stream for UntilImported { // new input: schedule wait of any parts which require // blocks to be known. - let ready = &mut this.ready; - let pending = &mut this.pending; - M::schedule_wait( - input, - this.status_check, - |target_hash, target_number, wait| pending - .entry(target_hash) - .or_insert_with(|| (target_number, Instant::now(), Vec::new())) - .2 - .push(wait), - |ready_item| ready.push_back(ready_item), - )?; + match M::needs_waiting(input, this.status_check)? { + DiscardWaitOrReady::Discard => {}, + DiscardWaitOrReady::Wait(items) => { + for (target_hash, target_number, wait) in items { + this.pending + .entry(target_hash) + .or_insert_with(|| (target_number, Instant::now(), Vec::new())) + .2 + .push(wait) + } + }, + DiscardWaitOrReady::Ready(item) => this.ready.push_back(item), + } + + if let Some(metrics) = &mut this.metrics { + metrics.waiting_messages_inc(); + } } Poll::Pending => break, } @@ -231,6 +309,9 @@ impl Stream for UntilImported(hash: H, id: AuthorityId) impl BlockUntilImported for SignedMessage { type Blocked = Self; - fn schedule_wait( + fn needs_waiting>( msg: Self::Blocked, status_check: &BlockStatus, - mut wait: Wait, - mut ready: Ready, - ) -> Result<(), Error> where - BlockStatus: BlockStatusT, - Wait: FnMut(Block::Hash, NumberFor, Self), - Ready: FnMut(Self::Blocked), - { + ) -> Result, Error> { let (&target_hash, target_number) = msg.target(); if let Some(number) = status_check.block_number(target_hash)? { if number != target_number { warn_authority_wrong_target(target_hash, msg.id); + return Ok(DiscardWaitOrReady::Discard); } else { - ready(msg); + return Ok(DiscardWaitOrReady::Ready(msg)); } - } else { - wait(target_hash, target_number, msg) } - Ok(()) + return Ok(DiscardWaitOrReady::Wait(vec![(target_hash, target_number, msg)])) } fn wait_completed(self, canon_number: NumberFor) -> Option { @@ -307,8 +381,12 @@ pub(crate) type UntilVoteTargetImported { - inner: Arc<(AtomicUsize, Mutex>>)>, + inner: Arc>>>, target_number: NumberFor, } @@ -317,16 +395,10 @@ impl Unpin for BlockGlobalMessage {} impl BlockUntilImported for BlockGlobalMessage { type Blocked = CommunicationIn; - fn schedule_wait( + fn needs_waiting>( input: Self::Blocked, status_check: &BlockStatus, - mut wait: Wait, - mut ready: Ready, - ) -> Result<(), Error> where - BlockStatus: BlockStatusT, - Wait: FnMut(Block::Hash, NumberFor, Self), - Ready: FnMut(Self::Blocked), - { + ) -> Result, Error> { use std::collections::hash_map::Entry; enum KnownOrUnknown { @@ -344,7 +416,6 @@ impl BlockUntilImported for BlockGlobalMessage { } let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); - let mut unknown_count = 0; { // returns false when should early exit. @@ -359,7 +430,6 @@ impl BlockUntilImported for BlockGlobalMessage { } else { entry.insert(KnownOrUnknown::Unknown(perceived_number)); - unknown_count += 1; perceived_number } } @@ -384,7 +454,7 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in precommit_targets { if !query_known(target_hash, target_number)? { - return Ok(()) + return Ok(DiscardWaitOrReady::Discard); } } }, @@ -402,66 +472,52 @@ impl BlockUntilImported for BlockGlobalMessage { for (target_number, target_hash) in targets { if !query_known(target_hash, target_number)? { - return Ok(()) + return Ok(DiscardWaitOrReady::Discard); } } }, }; } - // none of the hashes in the global message were unknown. - // we can just return the message directly. - if unknown_count == 0 { - ready(input); - return Ok(()) + let unknown_hashes = checked_hashes.into_iter().filter_map(|(hash, num)| match num { + KnownOrUnknown::Unknown(number) => Some((hash, number)), + KnownOrUnknown::Known(_) => None, + }).collect::>(); + + if unknown_hashes.is_empty() { + // none of the hashes in the global message were unknown. + // we can just return the message directly. + return Ok(DiscardWaitOrReady::Ready(input)); } - let locked_global = Arc::new((AtomicUsize::new(unknown_count), Mutex::new(Some(input)))); + let locked_global = Arc::new(Mutex::new(Some(input))); + + let items_to_await = unknown_hashes.into_iter().map(|(hash, target_number)| { + (hash, target_number, BlockGlobalMessage { inner: locked_global.clone(), target_number }) + }).collect(); // schedule waits for all unknown messages. // when the last one of these has `wait_completed` called on it, // the global message will be returned. - // - // in the future, we may want to issue sync requests to the network - // if this is taking a long time. - for (hash, is_known) in checked_hashes { - if let KnownOrUnknown::Unknown(target_number) = is_known { - wait(hash, target_number, BlockGlobalMessage { - inner: locked_global.clone(), - target_number, - }) - } - } - - Ok(()) + Ok(DiscardWaitOrReady::Wait(items_to_await)) } fn wait_completed(self, canon_number: NumberFor) -> Option { if self.target_number != canon_number { - // if we return without deducting the counter, then none of the other - // handles can return the commit message. + // Delete the inner message so it won't ever be forwarded. Future calls to + // `wait_completed` on the same `inner` will ignore it. + *self.inner.lock() = None; return None; } - let mut last_count = self.inner.0.load(Ordering::Acquire); - - // CAS loop to ensure that we always have a last reader. - loop { - if last_count == 1 { // we are the last one left. - return self.inner.1.lock().take(); - } - - let prev_value = self.inner.0.compare_and_swap( - last_count, - last_count - 1, - Ordering::SeqCst, - ); - - if prev_value == last_count { - return None; - } else { - last_count = prev_value; - } + match Arc::try_unwrap(self.inner) { + // This is the last reference and thus the last outstanding block to be awaited. `inner` + // is either `Some(_)` or `None`. The latter implies that a previous `wait_completed` + // call witnessed a block number mismatch (see above). + Ok(inner) => Mutex::into_inner(inner), + // There are still other strong references to this `Arc`, thus the message is blocked on + // other blocks to be imported. + Err(_) => None, } } } @@ -485,18 +541,18 @@ mod tests { use sc_client_api::BlockImportNotification; use futures::future::Either; use futures_timer::Delay; - use futures::channel::mpsc; + use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use finality_grandpa::Precommit; #[derive(Clone)] struct TestChainState { - sender: mpsc::UnboundedSender>, + sender: TracingUnboundedSender>, known_blocks: Arc>>, } impl TestChainState { fn new() -> (Self, ImportNotifications) { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("test"); let state = TestChainState { sender: tx, known_blocks: Arc::new(Mutex::new(HashMap::new())), @@ -593,7 +649,7 @@ mod tests { // enact all dependencies before importing the message enact_dependencies(&chain_state); - let (global_tx, global_rx) = futures::channel::mpsc::unbounded(); + let (global_tx, global_rx) = tracing_unbounded("test"); let until_imported = UntilGlobalMessageBlocksImported::new( import_notifications, @@ -601,6 +657,7 @@ mod tests { block_status, global_rx, "global", + None, ); global_tx.unbounded_send(msg).unwrap(); @@ -619,7 +676,7 @@ mod tests { let (chain_state, import_notifications) = TestChainState::new(); let block_status = chain_state.block_status(); - let (global_tx, global_rx) = futures::channel::mpsc::unbounded(); + let (global_tx, global_rx) = tracing_unbounded("test"); let until_imported = UntilGlobalMessageBlocksImported::new( import_notifications, @@ -627,6 +684,7 @@ mod tests { block_status, global_rx, "global", + None, ); global_tx.unbounded_send(msg).unwrap(); @@ -871,7 +929,7 @@ mod tests { let (chain_state, import_notifications) = TestChainState::new(); let block_status = chain_state.block_status(); - let (global_tx, global_rx) = futures::channel::mpsc::unbounded(); + let (global_tx, global_rx) = tracing_unbounded("test"); let block_sync_requester = TestBlockSyncRequester::default(); @@ -881,6 +939,7 @@ mod tests { block_status, global_rx, "global", + None, ); let h1 = make_header(5); @@ -941,4 +1000,88 @@ mod tests { futures::executor::block_on(test); } + + fn test_catch_up() -> Arc>>> { + let header = make_header(5); + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + precommits: vec![], + prevotes: vec![], + base_hash: header.hash(), + base_number: *header.number(), + }; + + let catch_up = voter::CommunicationIn::CatchUp( + unknown_catch_up.clone(), + voter::Callback::Blank, + ); + + Arc::new(Mutex::new(Some(catch_up))) + } + + #[test] + fn block_global_message_wait_completed_return_when_all_awaited() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = BlockGlobalMessage:: { + inner: msg_inner.clone(), + target_number: 1, + }; + + let waiting_block_2 = BlockGlobalMessage:: { + inner: msg_inner, + target_number: 2, + }; + + // waiting_block_2 is still waiting for block 2, thus this should return `None`. + assert!(waiting_block_1.wait_completed(1).is_none()); + + // Message only depended on block 1 and 2. Both have been imported, thus this should yield + // the message. + assert!(waiting_block_2.wait_completed(2).is_some()); + } + + #[test] + fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = BlockGlobalMessage:: { + inner: msg_inner.clone(), + target_number: 1, + }; + + let waiting_block_2 = BlockGlobalMessage:: { + inner: msg_inner, + target_number: 2, + }; + + // Calling wait_completed with wrong block number should yield None. + assert!(waiting_block_1.wait_completed(1234).is_none()); + + // All blocks, that the message depended on, have been imported. Still, given the above + // block number mismatch this should return None. + assert!(waiting_block_2.wait_completed(2).is_none()); + } + + #[test] + fn metrics_cleans_up_after_itself() { + let r = Registry::new(); + + let mut m1 = Metrics::register(&r).unwrap(); + let m2 = m1.clone(); + + // Add a new message to the 'queue' of m1. + m1.waiting_messages_inc(); + + // m1 and m2 are synced through the shared atomic. + assert_eq!(1, m2.global_waiting_messages.get()); + + // Drop 'queue' m1. + drop(m1); + + // Make sure m1 cleaned up after itself, removing all messages that were left in its queue + // when dropped from the global metric. + assert_eq!(0, m2.global_waiting_messages.get()); + } } diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index d37bfb1cc29648c096e745ea57ac793e17f16529..b60886d6dea6cfbb7d4b9151f2a2498b81be4404 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-informant" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" @@ -10,12 +10,15 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] ansi_term = "0.12.1" -futures = "0.3.1" +futures = "0.3.4" log = "0.4.8" -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } wasm-timer = "0.2" -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../service" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../service" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 53c9697868348cb9a73328d26a459601196cd240..42f498998362e6eb1a6d02f4af1fc98ce285030f 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -68,24 +68,24 @@ impl InformantDisplay { self.last_number = Some(best_number); let (status, target) = match (net_status.sync_state, net_status.best_seen_block) { - (SyncState::Idle, _) => ("Idle".into(), "".into()), - (SyncState::Downloading, None) => (format!("Syncing{}", speed), "".into()), - (SyncState::Downloading, Some(n)) => (format!("Syncing{}", speed), format!(", target=#{}", n)), + (SyncState::Idle, _) => ("💤 Idle".into(), "".into()), + (SyncState::Downloading, None) => (format!("⚙️ Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n)) => (format!("⚙️ Syncing{}", speed), format!(", target=#{}", n)), }; if self.format == OutputFormat::Coloured { info!( target: "substrate", - "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}", + "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), {} {}", Colour::White.bold().paint(&status), target, Colour::White.bold().paint(format!("{}", num_connected_peers)), - Colour::White.paint(format!("{}", best_number)), + Colour::White.bold().paint(format!("{}", best_number)), best_hash, - Colour::White.paint(format!("{}", finalized_number)), + Colour::White.bold().paint(format!("{}", finalized_number)), info.chain.finalized_hash, - TransferRateFormat(net_status.average_download_per_sec), - TransferRateFormat(net_status.average_upload_per_sec), + Colour::Green.paint(format!("⬇ {}", TransferRateFormat(net_status.average_download_per_sec))), + Colour::Red.paint(format!("⬆ {}", TransferRateFormat(net_status.average_upload_per_sec))), ); } else { info!( diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index d104a64a2dbd121a00d27dde8de80f61fee0c12d..66d5ed41fb5d418a2bae0952dc263cdee6ff8c84 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -16,6 +16,7 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. +use ansi_term::Colour; use sc_client_api::BlockchainEvents; use futures::prelude::*; use log::{info, warn, trace}; @@ -79,10 +80,10 @@ pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futur match maybe_ancestor { Ok(ref ancestor) if ancestor.hash != *last_hash => info!( - "Reorg from #{},{} to #{},{}, common ancestor #{},{}", - last_num, last_hash, - n.header.number(), n.hash, - ancestor.number, ancestor.hash, + "♻️ Reorg on #{},{} to #{},{}, common ancestor #{},{}", + Colour::Red.bold().paint(format!("{}", last_num)), last_hash, + Colour::Green.bold().paint(format!("{}", n.header.number())), n.hash, + Colour::White.bold().paint(format!("{}", ancestor.number)), ancestor.hash, ), Ok(_) => {}, Err(e) => warn!("Error computing tree route: {}", e), @@ -94,7 +95,7 @@ pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futur last_best = Some((n.header.number().clone(), n.hash.clone())); } - info!(target: "substrate", "Imported #{} ({})", n.header.number(), n.hash); + info!(target: "substrate", "✨ Imported #{} ({})", Colour::White.bold().paint(format!("{}", n.header.number())), n.hash); future::ready(()) }); diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 247376bc46e17952cf6c1551becb4324cba90ba7..af9cdf81bfe9a472f12dd4284001b95934ce0bd7 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-keystore" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,8 +12,8 @@ documentation = "https://docs.rs/sc-keystore" [dependencies] derive_more = "0.99.2" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../primitives/application-crypto" } hex = "0.4.0" rand = "0.7.2" serde_json = "1.0.41" @@ -22,3 +22,6 @@ parking_lot = "0.10.0" [dev-dependencies] tempfile = "3.1.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index 087ddf326de0141cc011cf4b046779193b4ba2a1..f8bc93097113b300bc85373918021da742700221 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -17,10 +17,11 @@ //! Keystore (and session key management) for ed25519 based chains like Polkadot. #![warn(missing_docs)] - -use std::{collections::HashMap, path::PathBuf, fs::{self, File}, io::{self, Write}, sync::Arc}; +use std::{collections::{HashMap, HashSet}, path::PathBuf, fs::{self, File}, io::{self, Write}, sync::Arc}; use sp_core::{ - crypto::{KeyTypeId, Pair as PairT, Public, IsWrappedBy, Protected}, traits::BareCryptoStore, + crypto::{IsWrappedBy, CryptoTypePublicPair, KeyTypeId, Pair as PairT, Protected, Public}, + traits::{BareCryptoStore, BareCryptoStoreError as TraitError}, + Encode, }; use sp_application_crypto::{AppKey, AppPublic, AppPair, ed25519, sr25519}; use parking_lot::RwLock; @@ -44,6 +45,12 @@ pub enum Error { /// Invalid seed #[display(fmt="Invalid seed")] InvalidSeed, + /// Public key type is not supported + #[display(fmt="Key crypto type is not supported")] + KeyNotSupported(KeyTypeId), + /// Pair not found for public key and KeyTypeId + #[display(fmt="Pair not found for {} public key", "_0")] + PairNotFound(String), /// Keystore unavailable #[display(fmt="Keystore unavailable")] Unavailable, @@ -52,6 +59,21 @@ pub enum Error { /// Keystore Result pub type Result = std::result::Result; +impl From for TraitError { + fn from(error: Error) -> Self { + match error { + Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), + Error::PairNotFound(e) => TraitError::PairNotFound(e), + Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { + TraitError::ValidationError(error.to_string()) + }, + Error::Unavailable => TraitError::Unavailable, + Error::Io(e) => TraitError::Other(e.to_string()), + Error::Json(e) => TraitError::Other(e.to_string()), + } + } +} + impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { @@ -220,16 +242,35 @@ impl Store { self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) } - /// Get public keys of all stored keys that match the given key type. - pub fn public_keys_by_type(&self, key_type: KeyTypeId) -> Result> { - let mut public_keys: Vec = self.additional.keys() - .filter_map(|(ty, public)| { - if *ty == key_type { - Some(TPublic::from_slice(public)) - } else { - None - } + /// Get public keys of all stored keys that match the key type. + /// + /// This will just use the type of the public key (a list of which to be returned) in order + /// to determine the key type. Unless you use a specialized application-type public key, then + /// this only give you keys registered under generic cryptography, and will not return keys + /// registered under the application type. + pub fn public_keys(&self) -> Result> { + self.raw_public_keys(Public::ID) + .map(|v| { + v.into_iter() + .map(|k| Public::from_slice(k.as_slice())) + .collect() }) + } + + /// Returns the file path for the given public key and key type. + fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { + let mut buf = self.path.as_ref()?.clone(); + let key_type = hex::encode(key_type.0); + let key = hex::encode(public); + buf.push(key_type + key.as_str()); + Some(buf) + } + + /// Returns a list of raw public keys filtered by `KeyTypeId` + fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { + let mut public_keys: Vec> = self.additional.keys() + .into_iter() + .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) .collect(); if let Some(path) = &self.path { @@ -241,8 +282,10 @@ impl Store { if let Some(name) = path.file_name().and_then(|n| n.to_str()) { match hex::decode(name) { Ok(ref hex) if hex.len() > 4 => { - if &hex[0..4] != &key_type.0 { continue } - let public = TPublic::from_slice(&hex[4..]); + if &hex[0..4] != &id.0 { + continue; + } + let public = hex[4..].to_vec(); public_keys.push(public); } _ => continue, @@ -253,71 +296,104 @@ impl Store { Ok(public_keys) } +} - /// Get public keys of all stored keys that match the key type. - /// - /// This will just use the type of the public key (a list of which to be returned) in order - /// to determine the key type. Unless you use a specialized application-type public key, then - /// this only give you keys registered under generic cryptography, and will not return keys - /// registered under the application type. - pub fn public_keys(&self) -> Result> { - self.public_keys_by_type::(Public::ID) - .map(|v| v.into_iter().map(Into::into).collect()) +impl BareCryptoStore for Store { + fn keys( + &self, + id: KeyTypeId + ) -> std::result::Result, TraitError> { + let raw_keys = self.raw_public_keys(id)?; + Ok(raw_keys.into_iter() + .fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v + })) + } + + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec + ) -> std::result::Result, TraitError> { + let all_keys = self.keys(id)?.into_iter().collect::>(); + Ok(keys.into_iter() + .filter(|key| all_keys.contains(key)) + .collect::>()) } - /// Returns the file path for the given public key and key type. - fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { - let mut buf = self.path.as_ref()?.clone(); - let key_type = hex::encode(key_type.0); - let key = hex::encode(public); - buf.push(key_type + key.as_str()); - Some(buf) + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> std::result::Result, TraitError> { + match key.0 { + ed25519::CRYPTO_ID => { + let pub_key = ed25519::Public::from_slice(key.1.as_slice()); + let key_pair: ed25519::Pair = self + .key_pair_by_type::(&pub_key, id) + .map_err(|e| TraitError::from(e))?; + Ok(key_pair.sign(msg).encode()) + } + sr25519::CRYPTO_ID => { + let pub_key = sr25519::Public::from_slice(key.1.as_slice()); + let key_pair: sr25519::Pair = self + .key_pair_by_type::(&pub_key, id) + .map_err(|e| TraitError::from(e))?; + Ok(key_pair.sign(msg).encode()) + } + _ => Err(TraitError::KeyNotSupported(id)) + } } -} -impl BareCryptoStore for Store { fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.public_keys_by_type::(key_type).unwrap_or_default() + self.raw_public_keys(key_type) + .map(|v| { + v.into_iter() + .map(|k| sr25519::Public::from_slice(k.as_slice())) + .collect() + }) + .unwrap_or_default() } fn sr25519_generate_new( &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> std::result::Result { + ) -> std::result::Result { let pair = match seed { Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), None => self.generate_by_type::(id), - }.map_err(|e| e.to_string())?; + }.map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } - fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.key_pair_by_type::(pub_key, id).ok() - } - fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.public_keys_by_type::(key_type).unwrap_or_default() + self.raw_public_keys(key_type) + .map(|v| { + v.into_iter() + .map(|k| ed25519::Public::from_slice(k.as_slice())) + .collect() + }) + .unwrap_or_default() } fn ed25519_generate_new( &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> std::result::Result { + ) -> std::result::Result { let pair = match seed { Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), None => self.generate_by_type::(id), - }.map_err(|e| e.to_string())?; + }.map_err(|e| -> TraitError { e.into() })?; Ok(pair.public()) } - fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.key_pair_by_type::(pub_key, id).ok() - } - fn insert_unknown(&mut self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> std::result::Result<(), ()> { @@ -337,7 +413,7 @@ impl BareCryptoStore for Store { mod tests { use super::*; use tempfile::TempDir; - use sp_core::{testing::{SR25519}, crypto::{Ss58Codec}}; + use sp_core::{testing::SR25519, crypto::Ss58Codec}; #[test] fn basic_store() { @@ -451,7 +527,7 @@ mod tests { fs::write(file_name, "test").expect("Invalid file is written"); assert!( - store.read().public_keys_by_type::(SR25519).unwrap().is_empty(), + store.read().sr25519_public_keys(SR25519).is_empty(), ); } } diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 955911f1f07c1777efc83332cf638b721b396f24..c2887ed71925facd68b91fc2f08b2424181680d1 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -11,11 +11,18 @@ documentation = "https://docs.rs/sc-network-gossip" [dependencies] -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" -libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } +libp2p = { version = "0.17.0", default-features = false, features = ["websocket"] } log = "0.4.8" lru = "0.4.3" -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } wasm-timer = "0.2" + +[dev-dependencies] +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index c06cb6268cc8063f56c3fe7dd4b3ed12b5f65c94..6a00b3d5a181ab812aea45007c42096535946a0f 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -17,13 +17,13 @@ use crate::{Network, Validator}; use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; -use sc_network::message::generic::ConsensusMessage; use sc_network::{Event, ReputationChange}; -use futures::{prelude::*, channel::mpsc}; +use futures::prelude::*; use libp2p::PeerId; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; use std::{borrow::Cow, pin::Pin, sync::Arc, task::{Context, Poll}}; +use sp_utils::mpsc::TracingUnboundedReceiver; /// Wraps around an implementation of the `Network` crate and provides gossiping capabilities on /// top of it. @@ -77,12 +77,7 @@ impl GossipEngine { topic: B::Hash, message: Vec, ) { - let message = ConsensusMessage { - engine_id: self.engine_id, - data: message, - }; - - self.state_machine.register_message(topic, message); + self.state_machine.register_message(topic, self.engine_id, message); } /// Broadcast all messages with given topic. @@ -92,7 +87,7 @@ impl GossipEngine { /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). pub fn messages_for(&mut self, topic: B::Hash) - -> mpsc::UnboundedReceiver + -> TracingUnboundedReceiver { self.state_machine.messages_for(self.engine_id, topic) } @@ -114,22 +109,14 @@ impl GossipEngine { message: Vec, force: bool, ) { - let message = ConsensusMessage { - engine_id: self.engine_id, - data: message, - }; - - self.state_machine.multicast(&mut *self.network, topic, message, force) + self.state_machine.multicast(&mut *self.network, topic, self.engine_id, message, force) } /// Send addressed message to the given peers. The message is not kept or multicast /// later on. pub fn send_message(&mut self, who: Vec, data: Vec) { for who in &who { - self.state_machine.send_message(&mut *self.network, who, ConsensusMessage { - engine_id: self.engine_id, - data: data.clone(), - }); + self.state_machine.send_message(&mut *self.network, who, self.engine_id, data.clone()); } } @@ -148,33 +135,38 @@ impl Future for GossipEngine { fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = &mut *self; - while let Poll::Ready(Some(event)) = this.network_event_stream.poll_next_unpin(cx) { - match event { - Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, roles } => { - if msg_engine_id != this.engine_id { - continue; + loop { + match this.network_event_stream.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => match event { + Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, role } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine.new_peer(&mut *this.network, remote, role); } - this.state_machine.new_peer(&mut *this.network, remote, roles); + Event::NotificationStreamClosed { remote, engine_id: msg_engine_id } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine.peer_disconnected(&mut *this.network, remote); + }, + Event::NotificationsReceived { remote, messages } => { + let engine_id = this.engine_id.clone(); + this.state_machine.on_incoming( + &mut *this.network, + remote, + messages.into_iter() + .filter_map(|(engine, data)| if engine == engine_id { + Some((engine, data.to_vec())) + } else { None }) + .collect() + ); + }, + Event::Dht(_) => {} } - Event::NotificationStreamClosed { remote, engine_id: msg_engine_id } => { - if msg_engine_id != this.engine_id { - continue; - } - this.state_machine.peer_disconnected(&mut *this.network, remote); - }, - Event::NotificationsReceived { remote, messages } => { - let engine_id = this.engine_id.clone(); - this.state_machine.on_incoming( - &mut *this.network, - remote, - messages.into_iter() - .filter_map(|(engine, data)| if engine == engine_id { - Some(ConsensusMessage { engine_id: engine, data: data.to_vec() }) - } else { None }) - .collect() - ); - }, - Event::Dht(_) => {} + // The network event stream closed. Do the same for [`GossipValidator`]. + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => break, } } @@ -186,3 +178,77 @@ impl Future for GossipEngine { Poll::Pending } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ValidationResult, ValidatorContext}; + use substrate_test_runtime_client::runtime::Block; + + struct TestNetwork {} + + impl Network for Arc { + fn event_stream(&self) -> Pin + Send>> { + let (_tx, rx) = futures::channel::mpsc::channel(0); + + // Return rx and drop tx. Thus the given channel will yield `Poll::Ready(None)` on first + // poll. + Box::pin(rx) + } + + fn report_peer(&self, _: PeerId, _: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _: PeerId) { + unimplemented!(); + } + + fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + unimplemented!(); + } + + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + + fn announce(&self, _: B::Hash, _: Vec) { + unimplemented!(); + } + } + + struct TestValidator {} + + impl Validator for TestValidator { + fn validate( + &self, + _: &mut dyn ValidatorContext, + _: &PeerId, + _: &[u8] + ) -> ValidationResult { + unimplemented!(); + } + } + + /// Regression test for the case where the `GossipEngine.network_event_stream` closes. One + /// should not ignore a `Poll::Ready(None)` as `poll_next_unpin` will panic on subsequent calls. + /// + /// See https://github.com/paritytech/substrate/issues/5000 for details. + #[test] + fn returns_when_network_event_stream_closes() { + let mut gossip_engine = GossipEngine::::new( + Arc::new(TestNetwork{}), + [1, 2, 3, 4], + "my_protocol".as_bytes(), + Arc::new(TestValidator{}), + ); + + futures::executor::block_on(futures::future::poll_fn(move |ctx| { + if let Poll::Pending = gossip_engine.poll_unpin(ctx) { + panic!( + "Expected gossip engine to finish on first poll, given that \ + `GossipEngine.network_event_stream` closes right away." + ) + } + Poll::Ready(()) + })) + } +} diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 675f0213651d631b012535dd0e2cdfad1cb1b565..c846534488bf5a94760688089b8ecb1917cace62 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -20,14 +20,13 @@ use std::collections::{HashMap, HashSet, hash_map::Entry}; use std::sync::Arc; use std::iter; use std::time; -use log::{trace, debug}; -use futures::channel::mpsc; +use log::trace; use lru::LruCache; use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; use sp_runtime::ConsensusEngineId; -pub use sc_network::message::generic::{Message, ConsensusMessage}; -use sc_network::config::Roles; +use sc_network::ObservedRole; use wasm_timer::Instant; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 @@ -52,7 +51,7 @@ mod rep { struct PeerConsensus { known_messages: HashSet, - roles: Roles, + role: ObservedRole, } /// Topic stream message with sender. @@ -67,7 +66,8 @@ pub struct TopicNotification { struct MessageEntry { message_hash: B::Hash, topic: B::Hash, - message: ConsensusMessage, + engine_id: ConsensusEngineId, + message: Vec, sender: Option, } @@ -89,7 +89,8 @@ impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { self.gossip.multicast( self.network, topic, - ConsensusMessage{ data: message, engine_id: self.engine_id.clone() }, + self.engine_id.clone(), + message, force, ); } @@ -113,11 +114,10 @@ fn propagate<'a, B: BlockT, I>( validators: &HashMap>>, ) // (msg_hash, topic, message) - where I: Clone + IntoIterator, + where I: Clone + IntoIterator)>, { let mut check_fns = HashMap::new(); - let mut message_allowed = move |who: &PeerId, intent: MessageIntent, topic: &B::Hash, message: &ConsensusMessage| { - let engine_id = message.engine_id; + let mut message_allowed = move |who: &PeerId, intent: MessageIntent, topic: &B::Hash, engine_id: ConsensusEngineId, message: &Vec| { let check_fn = match check_fns.entry(engine_id) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(vacant) => match validators.get(&engine_id) { @@ -126,11 +126,11 @@ fn propagate<'a, B: BlockT, I>( } }; - (check_fn)(who, intent, topic, &message.data) + (check_fn)(who, intent, topic, &message) }; for (id, ref mut peer) in peers.iter_mut() { - for (message_hash, topic, message) in messages.clone() { + for (message_hash, topic, engine_id, message) in messages.clone() { let intent = match intent { MessageIntent::Broadcast { .. } => if peer.known_messages.contains(&message_hash) { @@ -149,14 +149,14 @@ fn propagate<'a, B: BlockT, I>( other => other, }; - if !message_allowed(id, intent, &topic, &message) { + if !message_allowed(id, intent, &topic, engine_id, &message) { continue; } peer.known_messages.insert(message_hash.clone()); trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - network.write_notification(id.clone(), message.engine_id, message.data.clone()); + network.write_notification(id.clone(), engine_id, message.clone()); } } } @@ -164,7 +164,7 @@ fn propagate<'a, B: BlockT, I>( /// Consensus network protocol handler. Manages statements and candidate requests. pub struct ConsensusGossip { peers: HashMap>, - live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>, + live_message_sinks: HashMap<(ConsensusEngineId, B::Hash), Vec>>, messages: Vec>, known_messages: LruCache, validators: HashMap>>, @@ -192,10 +192,10 @@ impl ConsensusGossip { validator: Arc> ) { self.register_validator_internal(engine_id, validator.clone()); - let peers: Vec<_> = self.peers.iter().map(|(id, peer)| (id.clone(), peer.roles)).collect(); - for (id, roles) in peers { + let peers: Vec<_> = self.peers.iter().map(|(id, peer)| (id.clone(), peer.role.clone())).collect(); + for (id, role) in peers { let mut context = NetworkContext { gossip: self, network, engine_id: engine_id.clone() }; - validator.new_peer(&mut context, &id, roles); + validator.new_peer(&mut context, &id, role); } } @@ -204,20 +204,20 @@ impl ConsensusGossip { } /// Handle new connected peer. - pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, roles: Roles) { + pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { // light nodes are not valid targets for consensus gossip messages - if !roles.is_full() { + if role.is_light() { return; } - trace!(target:"gossip", "Registering {:?} {}", roles, who); + trace!(target:"gossip", "Registering {:?} {}", role, who); self.peers.insert(who.clone(), PeerConsensus { known_messages: HashSet::new(), - roles, + role: role.clone(), }); for (engine_id, v) in self.validators.clone() { let mut context = NetworkContext { gossip: self, network, engine_id: engine_id.clone() }; - v.new_peer(&mut context, &who, roles); + v.new_peer(&mut context, &who, role.clone()); } } @@ -225,13 +225,15 @@ impl ConsensusGossip { &mut self, message_hash: B::Hash, topic: B::Hash, - message: ConsensusMessage, + engine_id: ConsensusEngineId, + message: Vec, sender: Option, ) { if self.known_messages.put(message_hash.clone(), ()).is_none() { self.messages.push(MessageEntry { message_hash, topic, + engine_id, message, sender, }); @@ -246,10 +248,11 @@ impl ConsensusGossip { pub fn register_message( &mut self, topic: B::Hash, - message: ConsensusMessage, + engine_id: ConsensusEngineId, + message: Vec, ) { - let message_hash = HashFor::::hash(&message.data[..]); - self.register_message_hashed(message_hash, topic, message, None); + let message_hash = HashFor::::hash(&message[..]); + self.register_message_hashed(message_hash, topic, engine_id, message, None); } /// Call when a peer has been disconnected to stop tracking gossip status. @@ -273,7 +276,7 @@ impl ConsensusGossip { /// Rebroadcast all messages to all peers. fn rebroadcast(&mut self, network: &mut dyn Network) { let messages = self.messages.iter() - .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); + .map(|entry| (&entry.message_hash, &entry.topic, entry.engine_id, &entry.message)); propagate(network, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validators); } @@ -281,7 +284,9 @@ impl ConsensusGossip { pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { let messages = self.messages.iter() .filter_map(|entry| - if entry.topic == topic { Some((&entry.message_hash, &entry.topic, &entry.message)) } else { None } + if entry.topic == topic { + Some((&entry.message_hash, &entry.topic, entry.engine_id, &entry.message)) + } else { None } ); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; propagate(network, messages, intent, &mut self.peers, &self.validators); @@ -301,7 +306,7 @@ impl ConsensusGossip { let mut check_fns = HashMap::new(); let mut message_expired = move |entry: &MessageEntry| { - let engine_id = entry.message.engine_id; + let engine_id = entry.engine_id; let check_fn = match check_fns.entry(engine_id) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(vacant) => match validators.get(&engine_id) { @@ -310,7 +315,7 @@ impl ConsensusGossip { } }; - (check_fn)(entry.topic, &entry.message.data) + (check_fn)(entry.topic, &entry.message) }; self.messages.retain(|entry| !message_expired(entry)); @@ -328,14 +333,14 @@ impl ConsensusGossip { /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) pub fn messages_for(&mut self, engine_id: ConsensusEngineId, topic: B::Hash) - -> mpsc::UnboundedReceiver + -> TracingUnboundedReceiver { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_gossip_messages_for"); for entry in self.messages.iter_mut() - .filter(|e| e.topic == topic && e.message.engine_id == engine_id) + .filter(|e| e.topic == topic && e.engine_id == engine_id) { tx.unbounded_send(TopicNotification { - message: entry.message.data.clone(), + message: entry.message.clone(), sender: entry.sender.clone(), }) .expect("receiver known to be live; qed"); @@ -346,22 +351,21 @@ impl ConsensusGossip { rx } - /// Handle an incoming ConsensusMessage for topic by who via protocol. Discard message if topic - /// already known, the message is old, its source peers isn't a registered peer or the connection - /// to them is broken. Return `Some(topic, message)` if it was added to the internal queue, `None` - /// in all other cases. + /// Handle an incoming message for topic by who via protocol. Discard message if topic already + /// known, the message is old, its source peers isn't a registered peer or the connection to + /// them is broken. pub fn on_incoming( &mut self, network: &mut dyn Network, who: PeerId, - messages: Vec, + messages: Vec<(ConsensusEngineId, Vec)>, ) { if !messages.is_empty() { trace!(target: "gossip", "Received {} messages from peer {}", messages.len(), who); } - for message in messages { - let message_hash = HashFor::::hash(&message.data[..]); + for (engine_id, message) in messages { + let message_hash = HashFor::::hash(&message[..]); if self.known_messages.contains(&message_hash) { trace!(target:"gossip", "Ignored already known message from {}", who); @@ -369,13 +373,12 @@ impl ConsensusGossip { continue; } - let engine_id = message.engine_id; // validate the message let validation = self.validators.get(&engine_id) .cloned() .map(|v| { let mut context = NetworkContext { gossip: self, network, engine_id }; - v.validate(&mut context, &who, &message.data) + v.validate(&mut context, &who, &message) }); let validation_result = match validation { @@ -395,10 +398,10 @@ impl ConsensusGossip { if let Some(ref mut peer) = self.peers.get_mut(&who) { peer.known_messages.insert(message_hash); if let Entry::Occupied(mut entry) = self.live_message_sinks.entry((engine_id, topic)) { - debug!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); + trace!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); entry.get_mut().retain(|sink| { if let Err(e) = sink.unbounded_send(TopicNotification { - message: message.data.clone(), + message: message.clone(), sender: Some(who.clone()) }) { trace!(target: "gossip", "Error broadcasting message notification: {:?}", e); @@ -410,14 +413,14 @@ impl ConsensusGossip { } } if keep { - self.register_message_hashed(message_hash, topic, message, Some(who.clone())); + self.register_message_hashed(message_hash, topic, engine_id, message, Some(who.clone())); } } else { trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); network.report_peer(who.clone(), rep::UNREGISTERED_TOPIC); } } else { - trace!(target:"gossip", "Handled valid one hop message from peer {}", who); + trace!(target:"gossip", "Discard message from peer {}", who); } } } @@ -438,7 +441,7 @@ impl ConsensusGossip { }; if let Some(ref mut peer) = self.peers.get_mut(who) { - for entry in self.messages.iter().filter(|m| m.topic == topic && m.message.engine_id == engine_id) { + for entry in self.messages.iter().filter(|m| m.topic == topic && m.engine_id == engine_id) { let intent = if force { MessageIntent::ForcedBroadcast } else { @@ -449,14 +452,14 @@ impl ConsensusGossip { continue; } - if !message_allowed(who, intent, &entry.topic, &entry.message.data) { + if !message_allowed(who, intent, &entry.topic, &entry.message) { continue; } peer.known_messages.insert(entry.message_hash.clone()); trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - network.write_notification(who.clone(), engine_id, entry.message.data.clone()); + network.write_notification(who.clone(), engine_id, entry.message.clone()); } } } @@ -466,13 +469,14 @@ impl ConsensusGossip { &mut self, network: &mut dyn Network, topic: B::Hash, - message: ConsensusMessage, + engine_id: ConsensusEngineId, + message: Vec, force: bool, ) { - let message_hash = HashFor::::hash(&message.data); - self.register_message_hashed(message_hash, topic, message.clone(), None); + let message_hash = HashFor::::hash(&message); + self.register_message_hashed(message_hash, topic, engine_id, message.clone(), None); let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validators); + propagate(network, iter::once((&message_hash, &topic, engine_id, &message)), intent, &mut self.peers, &self.validators); } /// Send addressed message to a peer. The message is not kept or multicast @@ -481,19 +485,20 @@ impl ConsensusGossip { &mut self, network: &mut dyn Network, who: &PeerId, - message: ConsensusMessage, + engine_id: ConsensusEngineId, + message: Vec, ) { let peer = match self.peers.get_mut(who) { None => return, Some(peer) => peer, }; - let message_hash = HashFor::::hash(&message.data); + let message_hash = HashFor::::hash(&message); trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); peer.known_messages.insert(message_hash); - network.write_notification(who.clone(), message.engine_id, message.data); + network.write_notification(who.clone(), engine_id, message); } } @@ -513,7 +518,8 @@ mod tests { $consensus.messages.push(MessageEntry { message_hash: $hash, topic: $topic, - message: ConsensusMessage { data: $m, engine_id: [0, 0, 0, 0]}, + engine_id: [0, 0, 0, 0], + message: $m, sender: None, }); } @@ -588,13 +594,14 @@ mod tests { let mut consensus = ConsensusGossip::::new(); consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); - let message = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; + let engine_id = [0, 0, 0, 0]; + let message = vec![4, 5, 6]; let topic = HashFor::::hash(&[1,2,3]); - consensus.register_message(topic, message.clone()); + consensus.register_message(topic, engine_id, message.clone()); let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - assert_eq!(stream.next(), Some(TopicNotification { message: message.data, sender: None })); + assert_eq!(stream.next(), Some(TopicNotification { message: message, sender: None })); } #[test] @@ -602,11 +609,11 @@ mod tests { let mut consensus = ConsensusGossip::::new(); let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 0] }; + let msg_a = vec![1, 2, 3]; + let msg_b = vec![4, 5, 6]; - consensus.register_message(topic, msg_a); - consensus.register_message(topic, msg_b); + consensus.register_message(topic, [0, 0, 0, 0], msg_a); + consensus.register_message(topic, [0, 0, 0, 0], msg_b); assert_eq!(consensus.messages.len(), 2); } @@ -616,17 +623,16 @@ mod tests { let mut consensus = ConsensusGossip::::new(); consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); - let data = vec![4, 5, 6]; - let message = ConsensusMessage { data: data.clone(), engine_id: [0, 0, 0, 0] }; + let message = vec![4, 5, 6]; let topic = HashFor::::hash(&[1, 2, 3]); - consensus.register_message(topic, message.clone()); + consensus.register_message(topic, [0, 0, 0, 0], message.clone()); let mut stream1 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); let mut stream2 = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); - assert_eq!(stream1.next(), Some(TopicNotification { message: data.clone(), sender: None })); - assert_eq!(stream2.next(), Some(TopicNotification { message: data, sender: None })); + assert_eq!(stream1.next(), Some(TopicNotification { message: message.clone(), sender: None })); + assert_eq!(stream2.next(), Some(TopicNotification { message, sender: None })); } #[test] @@ -635,11 +641,11 @@ mod tests { consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); let topic = [1; 32].into(); - let msg_a = ConsensusMessage { data: vec![1, 2, 3], engine_id: [0, 0, 0, 0] }; - let msg_b = ConsensusMessage { data: vec![4, 5, 6], engine_id: [0, 0, 0, 1] }; + let msg_a = vec![1, 2, 3]; + let msg_b = vec![4, 5, 6]; - consensus.register_message(topic, msg_a); - consensus.register_message(topic, msg_b); + consensus.register_message(topic, [0, 0, 0, 0], msg_a); + consensus.register_message(topic, [0, 0, 0, 1], msg_b); let mut stream = block_on_stream(consensus.messages_for([0, 0, 0, 0], topic)); @@ -690,7 +696,7 @@ mod tests { let mut network = TestNetwork; let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id.clone(), Roles::FULL); + consensus.new_peer(&mut network, peer_id.clone(), ObservedRole::Full); assert!(consensus.peers.contains_key(&peer_id)); consensus.peer_disconnected(&mut network, peer_id.clone()); diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 74b5307ee9cdcdaf296a10dee8c4881a0495e105..6b330d7b618c0e702e52cc330ed2bd77b3ffdbdd 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_network::{config::Roles, PeerId}; +use sc_network::{ObservedRole, PeerId}; use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. pub trait Validator: Send + Sync { /// New peer is connected. - fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _roles: Roles) { + fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _role: ObservedRole) { } /// New connection is dropped. diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index d99dce2fd2cdd6c78411e2cf412e4227a71aeafa..5b4813e80a5fd57228dead65954f477eb6140f47 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -16,17 +16,16 @@ prost-build = "0.6.1" [dependencies] bitflags = "1.2.0" bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0-alpha.2", path = "../../utils/fork-tree" } -futures = "0.3.1" +fork-tree = { version = "2.0.0-alpha.5", path = "../../utils/fork-tree" } +futures = "0.3.4" futures_codec = "0.3.3" futures-timer = "3.0.1" wasm-timer = "0.2" -libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } linked-hash-map = "0.5.2" linked_hash_set = "0.1.3" log = "0.4.8" @@ -35,36 +34,43 @@ nohash-hasher = "0.2.0" parking_lot = "0.10.0" prost = "0.6.1" rand = "0.7.2" -rustc-hex = "2.0.1" -sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } -sc-client = { version = "0.8.0-alpha.2", path = "../" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-peerset = { version = "2.0.0-alpha.2", path = "../peerset" } +hex = "0.4.0" +sc-block-builder = { version = "0.8.0-alpha.5", path = "../block-builder" } +sc-client = { version = "0.8.0-alpha.5", path = "../" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-peerset = { version = "2.0.0-alpha.5", path = "../peerset" } pin-project = "0.4.6" serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0-alpha.2", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../../utils/prometheus" } +sp-arithmetic = { version = "2.0.0-alpha.5", path = "../../primitives/arithmetic" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.5", path = "../../utils/prometheus" } thiserror = "1" unsigned-varint = { version = "0.3.1", features = ["futures", "futures-codec"] } void = "1.0.2" zeroize = "1.0.0" +[dependencies.libp2p] +version = "0.17.0" +default-features = false +features = ["websocket", "kad", "mdns", "ping", "identify", "mplex", "yamux", "noise"] + [dev-dependencies] async-std = "1.5" assert_matches = "1.3" env_logger = "0.7.0" +libp2p = { version = "0.17.0", default-features = false, features = ["secio"] } quickcheck = "0.9.0" rand = "0.7.2" -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } sp-test-primitives = { version = "2.0.0-dev", path = "../../primitives/test-primitives" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } @@ -73,3 +79,6 @@ tempfile = "3.1.0" [features] default = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index 4e3dad8a13b93b7324c73386108aac2b05dbdb3c..99fd22dcbb765e4392f3fda12d931fd49e0fe44a 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -15,19 +15,19 @@ // along with Substrate. If not, see . use crate::{ - config::ProtocolId, - debug_info, discovery::DiscoveryBehaviour, discovery::DiscoveryOut, DiscoveryNetBehaviour, - Event, protocol::event::DhtEvent, ExHashT, + config::{ProtocolId, Role}, + debug_info, discovery::{DiscoveryBehaviour, DiscoveryOut}, + Event, ObservedRole, DhtEvent, ExHashT, }; -use crate::protocol::{self, light_client_handler, CustomMessageOutcome, Protocol}; +use crate::protocol::{self, light_client_handler, message::Roles, CustomMessageOutcome, Protocol}; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; use log::debug; use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, Justification}; -use std::{iter, task::Context, task::Poll}; +use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use std::{borrow::Cow, iter, task::Context, task::Poll}; use void; /// General behaviour of the network. Combines all protocols together. @@ -45,9 +45,14 @@ pub struct Behaviour { block_requests: protocol::BlockRequests, /// Light client request handling. light_client_handler: protocol::LightClientHandler, + /// Queue of events to produce for the outside. #[behaviour(ignore)] events: Vec>, + + /// Role of our local node, as originally passed from the configuration. + #[behaviour(ignore)] + role: Role, } /// Event generated by `Behaviour`. @@ -64,6 +69,7 @@ impl Behaviour { /// Builds a new `Behaviour`. pub async fn new( substrate: Protocol, + role: Role, user_agent: String, local_public_key: PublicKey, known_addresses: Vec<(PeerId, Multiaddr)>, @@ -85,7 +91,8 @@ impl Behaviour { ).await, block_requests, light_client_handler, - events: Vec::new() + events: Vec::new(), + role, } } @@ -118,6 +125,32 @@ impl Behaviour { self.debug_info.node(peer_id) } + /// Registers a new notifications protocol. + /// + /// After that, you can call `write_notifications`. + /// + /// Please call `event_stream` before registering a protocol, otherwise you may miss events + /// about the protocol that you have registered. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notifications_protocol( + &mut self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) { + let list = self.substrate.register_notifications_protocol(engine_id, protocol_name); + for (remote, roles) in list { + let role = reported_roles_to_observed_role(&self.role, remote, roles); + let ev = Event::NotificationStreamOpened { + remote: remote.clone(), + engine_id, + role, + }; + self.events.push(BehaviourOut::Event(ev)); + } + } + /// Returns a shared reference to the user protocol. pub fn user_protocol(&self) -> &Protocol { &self.substrate @@ -139,12 +172,27 @@ impl Behaviour { } /// Issue a light client request. - #[allow(unused)] pub fn light_client_request(&mut self, r: light_client_handler::Request) -> Result<(), light_client_handler::Error> { self.light_client_handler.request(r) } } +fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Roles) -> ObservedRole { + if roles.is_authority() { + match local_role { + Role::Authority { sentry_nodes } + if sentry_nodes.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurSentry, + Role::Sentry { validators } + if validators.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurGuardedAuthority, + _ => ObservedRole::Authority + } + } else if roles.is_full() { + ObservedRole::Full + } else { + ObservedRole::Light + } +} + impl NetworkBehaviourEventProcess for Behaviour { fn inject_event(&mut self, event: void::Void) { @@ -162,14 +210,16 @@ Behaviour { self.events.push(BehaviourOut::JustificationImport(origin, hash, nb, justification)), CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => self.events.push(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), - CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles } => + CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles } => { + let role = reported_roles_to_observed_role(&self.role, &remote, roles); for engine_id in protocols { self.events.push(BehaviourOut::Event(Event::NotificationStreamOpened { remote: remote.clone(), engine_id, - roles, + role: role.clone(), })); - }, + } + }, CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => for engine_id in protocols { self.events.push(BehaviourOut::Event(Event::NotificationStreamClosed { @@ -181,6 +231,9 @@ Behaviour { let ev = Event::NotificationsReceived { remote, messages }; self.events.push(BehaviourOut::Event(ev)); }, + CustomMessageOutcome::PeerNewBest(peer_id, number) => { + self.light_client_handler.update_best_block(&peer_id, number); + } CustomMessageOutcome::None => {} } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 3f73d761ce879e7b693492a3a04e2f2087f4584b..01acbe68755be6e27b6da4a15ab16dd162828b7d 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -20,7 +20,7 @@ //! See the documentation of [`Params`]. pub use crate::chain::{Client, FinalityProofProvider}; -pub use crate::on_demand_layer::OnDemand; +pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; pub use crate::service::{TransactionPool, EmptyTransactionPool}; pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; @@ -31,23 +31,28 @@ pub use crate::protocol::ProtocolConfig; use crate::service::ExHashT; -use bitflags::bitflags; -use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; -use sp_runtime::traits::{Block as BlockT}; -use libp2p::identity::{Keypair, ed25519}; -use libp2p::wasm_ext; -use libp2p::{PeerId, Multiaddr, multiaddr}; use core::{fmt, iter}; -use std::{future::Future, pin::Pin}; -use std::{error::Error, fs, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, sync::Arc}; -use zeroize::Zeroize; +use libp2p::identity::{ed25519, Keypair}; +use libp2p::wasm_ext; +use libp2p::{multiaddr, Multiaddr, PeerId}; use prometheus_endpoint::Registry; - +use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; +use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; +use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; +use std::{ + error::Error, + fs, + io::{self, Write}, + net::Ipv4Addr, + path::{Path, PathBuf}, + sync::Arc, +}; +use zeroize::Zeroize; /// Network initialization parameters. pub struct Params { - /// Assigned roles for our node (full, light, ...). - pub roles: Roles, + /// Assigned role for our node (full, light, ...). + pub role: Role, /// How to spawn background tasks. If you pass `None`, then a threads pool will be used by /// default. @@ -97,54 +102,48 @@ pub struct Params { pub metrics_registry: Option, } -bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; +/// Role of the local node. +#[derive(Debug, Clone)] +pub enum Role { + /// Regular full node. + Full, + /// Regular light node. + Light, + /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. + Sentry { + /// Address and identity of the validator nodes that we're guarding. + /// + /// The nodes will be granted some priviledged status. + validators: Vec, + }, + /// Actual authority. + Authority { + /// List of public addresses and identities of our sentry nodes. + sentry_nodes: Vec, } } -impl Roles { - /// Does this role represents a client that holds full chain data locally? - pub fn is_full(&self) -> bool { - self.intersects(Roles::FULL | Roles::AUTHORITY) - } - - /// Does this role represents a client that does not participates in the consensus? +impl Role { + /// True for `Role::Authority` pub fn is_authority(&self) -> bool { - *self == Roles::AUTHORITY + matches!(self, Role::Authority { .. }) } - /// Does this role represents a client that does not hold full chain data locally? - pub fn is_light(&self) -> bool { - !self.is_full() + /// True for `Role::Authority` and `Role::Sentry` since they're both + /// announced as having the authority role to the network. + pub fn is_network_authority(&self) -> bool { + matches!(self, Role::Authority { .. } | Role::Sentry { .. }) } } -impl fmt::Display for Roles { +impl fmt::Display for Role { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "{:?}", self) - } -} - -impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } -} - -impl codec::EncodeLike for Roles {} - -impl codec::Decode for Roles { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + match self { + Role::Full => write!(f, "FULL"), + Role::Light => write!(f, "LIGHT"), + Role::Sentry { .. } => write!(f, "SENTRY"), + Role::Authority { .. } => write!(f, "AUTHORITY"), + } } } @@ -214,6 +213,67 @@ pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { Ok((who, addr)) } +/// Address of a node, including its identity. +/// +/// This struct represents a decoded version of a multiaddress that ends with `/p2p/`. +/// +/// # Example +/// +/// ``` +/// # use sc_network::{Multiaddr, PeerId, config::MultiaddrWithPeerId}; +/// let addr: MultiaddrWithPeerId = +/// "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".parse().unwrap(); +/// assert_eq!(addr.peer_id.to_base58(), "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"); +/// assert_eq!(addr.multiaddr.to_string(), "/ip4/198.51.100.19/tcp/30333"); +/// ``` +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] +#[serde(try_from = "String", into = "String")] +pub struct MultiaddrWithPeerId { + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, +} + +impl MultiaddrWithPeerId { + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id.clone())); + self.multiaddr.clone().with(proto) + } +} + +impl fmt::Display for MultiaddrWithPeerId { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } +} + +impl FromStr for MultiaddrWithPeerId { + type Err = ParseErr; + + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(MultiaddrWithPeerId { + peer_id, + multiaddr, + }) + } +} + +impl From for String { + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } +} + +impl TryFrom for MultiaddrWithPeerId { + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } +} + /// Error that can be generated by `parse_str_addr`. #[derive(Debug)] pub enum ParseErr { @@ -254,28 +314,27 @@ impl From for ParseErr { /// Network service configuration. #[derive(Clone, Debug)] pub struct NetworkConfiguration { - /// Directory path to store general network configuration. None means nothing will be saved. - pub config_path: Option, /// Directory path to store network-specific configuration. None means nothing will be saved. - pub net_config_path: Option, + pub net_config_path: PathBuf, /// Multiaddresses to listen for incoming connections. pub listen_addresses: Vec, /// Multiaddresses to advertise. Detected automatically if empty. pub public_addresses: Vec, /// List of initial node addresses - pub boot_nodes: Vec, + pub boot_nodes: Vec, /// The node key configuration, which determines the node's network identity keypair. pub node_key: NodeKeyConfig, + /// List of notifications protocols that the node supports. Must also include a + /// `ConsensusEngineId` for backwards-compatibility. + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, [u8]>)>, /// Maximum allowed number of incoming connections. pub in_peers: u32, /// Number of outgoing connections we're trying to maintain. pub out_peers: u32, /// List of reserved node addresses. - pub reserved_nodes: Vec, + pub reserved_nodes: Vec, /// The non-reserved peer mode. pub non_reserved_mode: NonReservedPeerMode, - /// List of sentry node public addresses. - pub sentry_nodes: Vec, /// Client identifier. Sent over the wire for debugging purposes. pub client_version: String, /// Name of the node. Sent over the wire for debugging purposes. @@ -286,22 +345,27 @@ pub struct NetworkConfiguration { pub max_parallel_downloads: u32, } -impl Default for NetworkConfiguration { - fn default() -> Self { +impl NetworkConfiguration { + /// Create new default configuration + pub fn new, SV: Into>( + node_name: SN, + client_version: SV, + node_key: NodeKeyConfig, + net_config_path: &PathBuf, + ) -> Self { NetworkConfiguration { - config_path: None, - net_config_path: None, + net_config_path: net_config_path.clone(), listen_addresses: Vec::new(), public_addresses: Vec::new(), boot_nodes: Vec::new(), - node_key: NodeKeyConfig::Ed25519(Secret::New), + node_key, + notifications_protocols: Vec::new(), in_peers: 25, out_peers: 75, reserved_nodes: Vec::new(), non_reserved_mode: NonReservedPeerMode::Accept, - sentry_nodes: Vec::new(), - client_version: "unknown".into(), - node_name: "unknown".into(), + client_version: client_version.into(), + node_name: node_name.into(), transport: TransportConfig::Normal { enable_mdns: false, allow_private_ipv4: true, @@ -314,30 +378,39 @@ impl Default for NetworkConfiguration { } impl NetworkConfiguration { - /// Create a new instance of default settings. - pub fn new() -> Self { - Self::default() - } - /// Create new default configuration for localhost-only connection with random port (useful for testing) pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new(); + let mut config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + &std::env::current_dir().expect("current directory must exist"), + ); + config.listen_addresses = vec![ iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) .collect() ]; + config } /// Create new default configuration for localhost-only connection with random port (useful for testing) pub fn new_memory() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new(); + let mut config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + &std::env::current_dir().expect("current directory must exist"), + ); + config.listen_addresses = vec![ iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) .chain(iter::once(multiaddr::Protocol::Tcp(0))) .collect() ]; + config } } @@ -402,6 +475,12 @@ pub enum NodeKeyConfig { Ed25519(Secret) } +impl Default for NodeKeyConfig { + fn default() -> NodeKeyConfig { + NodeKeyConfig::Ed25519(Secret::New) + } +} + /// The options for obtaining a Ed25519 secret key. pub type Ed25519Secret = Secret; diff --git a/client/network/src/debug_info.rs b/client/network/src/debug_info.rs index 17fb622f7cd3cab44f3b17ab508d7f1a3b258882..e2803cde35a772e5e094919ef31b364af640c487 100644 --- a/client/network/src/debug_info.rs +++ b/client/network/src/debug_info.rs @@ -17,14 +17,15 @@ use fnv::FnvHashMap; use futures::prelude::*; use libp2p::Multiaddr; -use libp2p::core::nodes::listeners::ListenerId; +use libp2p::core::connection::{ConnectionId, ListenerId}; use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; use log::{debug, trace, error}; -use std::error; +use smallvec::SmallVec; +use std::{error, io}; use std::collections::hash_map::Entry; use std::pin::Pin; use std::task::{Context, Poll}; @@ -56,14 +57,27 @@ struct NodeInfo { /// When we will remove the entry about this node from the list, or `None` if we're connected /// to the node. info_expire: Option, - /// How we're connected to the node. - endpoint: ConnectedPoint, + /// Non-empty list of connected endpoints, one per connection. + endpoints: SmallVec<[ConnectedPoint; crate::MAX_CONNECTIONS_PER_PEER]>, /// Version reported by the remote, or `None` if unknown. client_version: Option, /// Latest ping time with this node. latest_ping: Option, } +impl NodeInfo { + fn new(endpoint: ConnectedPoint) -> Self { + let mut endpoints = SmallVec::new(); + endpoints.push(endpoint); + NodeInfo { + info_expire: None, + endpoints, + client_version: None, + latest_ping: None, + } + } +} + impl DebugInfoBehaviour { /// Builds a new `DebugInfoBehaviour`. pub fn new( @@ -121,9 +135,9 @@ impl DebugInfoBehaviour { pub struct Node<'a>(&'a NodeInfo); impl<'a> Node<'a> { - /// Returns the endpoint we are connected to or were last connected to. + /// Returns the endpoint of an established connection to the peer. pub fn endpoint(&self) -> &'a ConnectedPoint { - &self.0.endpoint + &self.0.endpoints[0] // `endpoints` are non-empty by definition } /// Returns the latest version information we know of. @@ -168,18 +182,17 @@ impl NetworkBehaviour for DebugInfoBehaviour { list } - fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { - self.ping.inject_connected(peer_id.clone(), endpoint.clone()); - self.identify.inject_connected(peer_id.clone(), endpoint.clone()); + fn inject_connected(&mut self, peer_id: &PeerId) { + self.ping.inject_connected(peer_id); + self.identify.inject_connected(peer_id); + } - match self.nodes_info.entry(peer_id) { + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.ping.inject_connection_established(peer_id, conn, endpoint); + self.identify.inject_connection_established(peer_id, conn, endpoint); + match self.nodes_info.entry(peer_id.clone()) { Entry::Vacant(e) => { - e.insert(NodeInfo { - info_expire: None, - endpoint, - client_version: None, - latest_ping: None, - }); + e.insert(NodeInfo::new(endpoint.clone())); } Entry::Occupied(e) => { let e = e.into_mut(); @@ -188,14 +201,26 @@ impl NetworkBehaviour for DebugInfoBehaviour { e.latest_ping = None; } e.info_expire = None; - e.endpoint = endpoint; + e.endpoints.push(endpoint.clone()); } } } - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - self.ping.inject_disconnected(peer_id, endpoint.clone()); - self.identify.inject_disconnected(peer_id, endpoint); + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.ping.inject_connection_closed(peer_id, conn, endpoint); + self.identify.inject_connection_closed(peer_id, conn, endpoint); + + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.endpoints.retain(|ep| ep != endpoint) + } else { + error!(target: "sub-libp2p", + "Unknown connection to {:?} closed: {:?}", peer_id, endpoint); + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.ping.inject_disconnected(peer_id); + self.identify.inject_disconnected(peer_id); if let Some(entry) = self.nodes_info.get_mut(peer_id) { entry.info_expire = Some(Instant::now() + CACHE_EXPIRE); @@ -205,26 +230,15 @@ impl NetworkBehaviour for DebugInfoBehaviour { } } - fn inject_node_event( + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, event: <::Handler as ProtocolsHandler>::OutEvent ) { match event { - EitherOutput::First(event) => self.ping.inject_node_event(peer_id, event), - EitherOutput::Second(event) => self.identify.inject_node_event(peer_id, event), - } - } - - fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) { - self.ping.inject_replaced(peer_id.clone(), closed_endpoint.clone(), new_endpoint.clone()); - self.identify.inject_replaced(peer_id.clone(), closed_endpoint, new_endpoint.clone()); - - if let Some(entry) = self.nodes_info.get_mut(&peer_id) { - entry.endpoint = new_endpoint; - } else { - error!(target: "sub-libp2p", - "Disconnected from node we were not connected to {:?}", peer_id); + EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), + EitherOutput::Second(event) => self.identify.inject_event(peer_id, connection, event), } } @@ -258,9 +272,9 @@ impl NetworkBehaviour for DebugInfoBehaviour { self.identify.inject_listener_error(id, err); } - fn inject_listener_closed(&mut self, id: ListenerId) { - self.ping.inject_listener_closed(id); - self.identify.inject_listener_closed(id); + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.ping.inject_listener_closed(id, reason); + self.identify.inject_listener_closed(id, reason); } fn poll( @@ -283,11 +297,12 @@ impl NetworkBehaviour for DebugInfoBehaviour { }, Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }), - Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }) => - return Poll::Ready(NetworkBehaviourAction::SendEvent { + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, + handler, event: EitherOutput::First(event) }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => @@ -312,11 +327,12 @@ impl NetworkBehaviour for DebugInfoBehaviour { }, Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }), - Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }) => - return Poll::Ready(NetworkBehaviourAction::SendEvent { + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, + handler, event: EitherOutput::Second(event) }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index a3a69f9db76b9b501d0d2c3116b4626951f477bb..91634b8abfe4fcef3d1dd6bf732c2b760e41448c 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -46,33 +46,22 @@ //! use crate::config::ProtocolId; -use futures::{future::BoxFuture, prelude::*}; +use futures::prelude::*; use futures_timer::Delay; -use libp2p::core::{nodes::listeners::ListenerId, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::core::upgrade::{ProtocolName, UpgradeInfo, InboundUpgrade, OutboundUpgrade}; -use libp2p::swarm::{ - KeepAlive, - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - PollParameters, - ProtocolsHandler, - ProtocolsHandlerEvent, - ProtocolsHandlerUpgrErr, - SubstreamProtocol -}; +use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent, Quorum, Record}; use libp2p::kad::GetClosestPeersError; use libp2p::kad::handler::KademliaHandler; use libp2p::kad::QueryId; use libp2p::kad::record::{self, store::MemoryStore}; #[cfg(not(target_os = "unknown"))] -use libp2p::{swarm::toggle::Toggle}; +use libp2p::swarm::{protocols_handler::multi::MultiHandler, toggle::Toggle}; #[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; use log::{debug, info, trace, warn, error}; -use std::{cmp, collections::{HashMap, HashSet, VecDeque}, time::Duration}; +use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, time::Duration}; use std::task::{Context, Poll}; use sp_core::hexdisplay::HexDisplay; @@ -153,6 +142,8 @@ impl DiscoveryBehaviour { let mut config = KademliaConfig::default(); + // NB: If the protocol name derivation below is changed, check if + // `DiscoveryBehaviour::new_handler` is still correct. let proto_name = if libp2p::kad::protocol::DEFAULT_PROTO_NAME == p.as_bytes() { // Temporary hack to retain backwards compatibility. Once this version @@ -165,6 +156,7 @@ impl DiscoveryBehaviour { v.extend_from_slice(b"/kad"); v }; + config.set_protocol_name(proto_name); let store = MemoryStore::new(self.local_peer_id.clone()); @@ -193,6 +185,9 @@ impl DiscoveryBehaviour { /// If we didn't know this address before, also generates a `Discovered` event. pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { if self.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { + for k in self.kademlias.values_mut() { + k.add_address(&peer_id, addr.clone()) + } self.discoveries.push_back(peer_id.clone()); self.user_defined.push((peer_id, addr)); } @@ -264,16 +259,19 @@ pub enum DiscoveryOut { } impl NetworkBehaviour for DiscoveryBehaviour { - type ProtocolsHandler = DiscoveryHandler; + type ProtocolsHandler = MultiHandler>; type OutEvent = DiscoveryOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - let mut v = Vec::new(); - for (p, k) in &mut self.kademlias { - let h = NetworkBehaviour::new_handler(k); - v.push((p.clone(), h)) - } - DiscoveryHandler { handlers: v } + let iter = self.kademlias.iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); + + MultiHandler::try_from_iter(iter) + .expect("There can be at most one handler per `ProtocolId` and \ + protocol names contain the `ProtocolId` so no two protocol \ + names in `self.kademlias` can be equal which is the only error \ + `try_from_iter` can return, therefore this call is guaranteed \ + to succeed; qed") } fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { @@ -328,23 +326,29 @@ impl NetworkBehaviour for DiscoveryBehaviour { list } - fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { self.num_connections += 1; for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connected(k, peer_id.clone(), endpoint.clone()) + NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) } } - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { + fn inject_connected(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connected(k, peer_id) + } + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { self.num_connections -= 1; for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_disconnected(k, peer_id, endpoint.clone()) + NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) } } - fn inject_replaced(&mut self, peer_id: PeerId, closed: ConnectedPoint, opened: ConnectedPoint) { + fn inject_disconnected(&mut self, peer_id: &PeerId) { for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_replaced(k, peer_id.clone(), closed.clone(), opened.clone()) + NetworkBehaviour::inject_disconnected(k, peer_id) } } @@ -359,13 +363,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_node_event( + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, (pid, event): ::OutEvent, ) { if let Some(kad) = self.kademlias.get_mut(&pid) { - return kad.inject_node_event(peer_id, event) + return kad.inject_event(peer_id, connection, event) } log::error!(target: "sub-libp2p", "inject_node_event: no kademlia instance registered for protocol {:?}", @@ -375,7 +380,7 @@ impl NetworkBehaviour for DiscoveryBehaviour { fn inject_new_external_addr(&mut self, addr: &Multiaddr) { let new_addr = addr.clone() .with(Protocol::P2p(self.local_peer_id.clone().into())); - info!(target: "sub-libp2p", "Discovered new external address for our node: {}", new_addr); + info!(target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr); for k in self.kademlias.values_mut() { NetworkBehaviour::inject_new_external_addr(k, addr) } @@ -407,10 +412,10 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } - fn inject_listener_closed(&mut self, id: ListenerId) { + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { error!(target: "sub-libp2p", "Libp2p listener {:?} closed", id); for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_listener_closed(k, id) + NetworkBehaviour::inject_listener_closed(k, id, reason) } } @@ -544,14 +549,15 @@ impl NetworkBehaviour for DiscoveryBehaviour { e => { warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) } - }, + } NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }), - NetworkBehaviourAction::SendEvent { peer_id, event } => - return Poll::Ready(NetworkBehaviourAction::SendEvent { + NetworkBehaviourAction::DialPeer { peer_id, condition } => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, + handler, event: (pid.clone(), event) }), NetworkBehaviourAction::ReportObservedAddr { address } => @@ -582,9 +588,9 @@ impl NetworkBehaviour for DiscoveryBehaviour { }, NetworkBehaviourAction::DialAddress { address } => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }), - NetworkBehaviourAction::SendEvent { event, .. } => + NetworkBehaviourAction::DialPeer { peer_id, condition } => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, // `event` is an enum with no variant NetworkBehaviourAction::ReportObservedAddr { address } => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), @@ -595,182 +601,6 @@ impl NetworkBehaviour for DiscoveryBehaviour { } } -pub struct DiscoveryHandler { - handlers: Vec<(ProtocolId, KademliaHandler)> -} - -impl ProtocolsHandler for DiscoveryHandler { - type InEvent = (ProtocolId, as ProtocolsHandler>::InEvent); - type OutEvent = (ProtocolId, as ProtocolsHandler>::OutEvent); - type Error = as ProtocolsHandler>::Error; - type InboundProtocol = KadUpgrade< as ProtocolsHandler>::InboundProtocol>; - type OutboundProtocol = as ProtocolsHandler>::OutboundProtocol; - type OutboundOpenInfo = (ProtocolId, as ProtocolsHandler>::OutboundOpenInfo); - - fn listen_protocol(&self) -> SubstreamProtocol { - let upgrades = self.handlers.iter() - .map(|h| (h.0.clone(), h.1.listen_protocol().into_upgrade().1)) - .collect(); - SubstreamProtocol::new(KadUpgrade { upgrades }) - } - - fn inject_fully_negotiated_outbound - ( &mut self - , protocol: >::Output - , (pid, rest): Self::OutboundOpenInfo - ) - { - if let Some((_, kad)) = self.handlers.iter_mut().find(|p| &p.0 == &pid) { - return kad.inject_fully_negotiated_outbound(protocol, rest) - } - log::error!(target: "sub-libp2p", - "inject_fully_negotiated_outbound: no kademlia instance registered for protocol {:?}", - pid) - } - - fn inject_fully_negotiated_inbound - ( &mut self - , (pid, rest): >::Output - ) - { - if let Some((_, kad)) = self.handlers.iter_mut().find(|p| &p.0 == &pid) { - return kad.inject_fully_negotiated_inbound(rest) - } - log::error!(target: "sub-libp2p", - "inject_fully_negotiated_inbound: no kademlia instance registered for protocol {:?}", - pid) - } - - fn inject_event(&mut self, (pid, event): Self::InEvent) { - if let Some((_, kad)) = self.handlers.iter_mut().find(|p| &p.0 == &pid) { - return kad.inject_event(event) - } - log::error!(target: "sub-libp2p", - "inject_event: no kademlia instance registered for protocol {:?}", - pid) - } - - fn inject_dial_upgrade_error - ( &mut self - , (pid, other): Self::OutboundOpenInfo - , error: ProtocolsHandlerUpgrErr - ) - { - if let Some((_, kad)) = self.handlers.iter_mut().find(|p| &p.0 == &pid) { - return kad.inject_dial_upgrade_error(other, error) - } - log::error!(target: "sub-libp2p", - "inject_dial_upgrade_error: no kademlia instance registered for protocol {:?}", - pid) - } - - fn connection_keep_alive(&self) -> KeepAlive { - self.handlers.iter() - .map(|x| x.1.connection_keep_alive()) - .max() - .unwrap_or(KeepAlive::No) - } - - fn poll - ( &mut self - , cx: &mut Context - ) -> Poll> - { - for (pid, kad) in self.handlers.iter_mut() { - if let Poll::Ready(event) = kad.poll(cx) { - let event = event - .map_outbound_open_info(|i| (pid.clone(), i)) - .map_custom(|p| (pid.clone(), p)); - return Poll::Ready(event) - } - } - - Poll::Pending - } -} - -#[derive(Debug, Clone)] -pub struct KadInfo(ProtocolId, T); - -impl ProtocolName for KadInfo { - fn protocol_name(&self) -> &[u8] { - self.1.protocol_name() - } -} - -#[derive(Debug, Clone)] -pub struct KadUpgrade { - upgrades: Vec<(ProtocolId, T)> -} - -impl UpgradeInfo for KadUpgrade { - type Info = KadInfo; - type InfoIter = Vec; - - fn protocol_info(&self) -> Self::InfoIter { - self.upgrades.iter().map(|(p, i)| std::iter::repeat(p.clone()).zip(i.protocol_info())) - .flatten() - .map(|(p, i)| KadInfo(p, i)) - .collect() - } -} - -impl InboundUpgrade for KadUpgrade -where - C: AsyncRead + AsyncWrite + Unpin, - T: InboundUpgrade, - T::Info: Send + 'static, - T::Future: Send + 'static -{ - type Output = (ProtocolId, A); - type Error = (ProtocolId, E); - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(mut self, resource: C, info: Self::Info) -> Self::Future { - let pid = info.0; - let upg = info.1; - let pos = self.upgrades.iter().position(|i| &i.0 == &pid) - .expect("upgrade_inbound is applied to a protocol id from \ - protocol_info, which only contains ids from the same set of \ - upgrades we are searching here, therefore looking for this id \ - is guaranteed to give us a non-empty result; qed"); - self.upgrades.remove(pos).1.upgrade_inbound(resource, upg) - .map(move |out| match out { - Ok(o) => Ok((pid, o)), - Err(e) => Err((pid, e)) - }) - .boxed() - } -} - -impl OutboundUpgrade for KadUpgrade -where - C: AsyncRead + AsyncWrite + Unpin, - T: OutboundUpgrade, - T::Info: Send + 'static, - T::Future: Send + 'static -{ - type Output = (ProtocolId, A); - type Error = (ProtocolId, E); - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(mut self, resource: C, info: Self::Info) -> Self::Future { - let pid = info.0; - let upg = info.1; - let pos = self.upgrades.iter().position(|i| &i.0 == &pid) - .expect("upgrade_outbound is applied to a protocol id from \ - protocol_info, which only contains ids from the same set of \ - upgrades we are searching here, therefore looking for this id \ - is guaranteed to give us a non-empty result; qed"); - self.upgrades.remove(pos).1.upgrade_outbound(resource, upg) - .map(move |out| match out { - Ok(o) => Ok((pid, o)), - Err(e) => Err((pid, e)) - }) - .boxed() - } -} - #[cfg(test)] mod tests { use crate::config::ProtocolId; diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index a5397a4e3e6263f2c6c33263b7002549cedcacae..d8afa1f1530ef8935df8a7177cd4d696d0c4adc2 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -90,23 +90,57 @@ //! ## Substreams //! //! Once a connection has been established and uses multiplexing, substreams can be opened. When -//! a substream is open, the **multistream-select** protocol is used to negotiate which protocol to -//! use on that given substream. In practice, Substrate opens the following substreams: -//! -//! - We periodically open an ephemeral substream in order to ping the remote and check whether the -//! connection is still alive. Failure for the remote to reply leads to a disconnection. This uses -//! the libp2p ping protocol. -//! - We periodically open an ephemeral substream in order to ask information from the remote. This -//! is called [the `identify` protocol](https://github.com/libp2p/specs/tree/master/identify). -//! - We periodically open ephemeral substreams for Kademlia random walk queries. Each Kademlia -//! query is done in a new separate substream. This uses the -//! [standard libp2p Kademlia protocol](https://github.com/libp2p/specs/pull/108). -//! - We optionally keep a substream alive for all Substrate-based communications. The name of the -//! protocol negotiated is based on the *protocol ID* passed as part of the network configuration. -//! This protocol ID should be unique for each chain and prevents nodes from different chains from -//! connecting to each other. More information below. -//! -//! ## The Substrate substream +//! a substream is open, the **multistream-select** protocol is used to negotiate which protocol +//! to use on that given substream. +//! +//! Protocols that are specific to a certain chain have a `` in their name. This +//! "protocol ID" is defined in the chain specifications. For example, the protocol ID of Polkadot +//! is "dot". In the protocol names below, `` must be replaced with the corresponding +//! protocol ID. +//! +//! > **Note**: It is possible for the same connection to be used for multiple chains. For example, +//! > one can use both the `/dot/sync/2` and `/sub/sync/2` protocols on the same +//! > connection, provided that the remote supports them. +//! +//! Substrate uses the following standard libp2p protocols: +//! +//! - **`/ipfs/ping/1.0.0`**. We periodically open an ephemeral substream in order to ping the +//! remote and check whether the connection is still alive. Failure for the remote to reply leads +//! to a disconnection. +//! - **[`/ipfs/id/1.0.0`](https://github.com/libp2p/specs/tree/master/identify)**. We +//! periodically open an ephemeral substream in order to ask information from the remote. +//! - **[`/ipfs/kad/1.0.0`](https://github.com/libp2p/specs/pull/108)**. We periodically open +//! ephemeral substreams for Kademlia random walk queries. Each Kademlia query is done in a +//! separate substream. +//! +//! Additionally, Substrate uses the following non-libp2p-standard protocols: +//! +//! - **`/substrate//`** (where `` must be replaced with the +//! protocol ID of the targeted chain, and `` is a number between 2 and 6). For each +//! connection we optionally keep an additional substream for all Substrate-based communications alive. +//! This protocol is considered legacy, and is progressively being replaced with alternatives. +//! This is designated as "The legacy Substrate substream" in this documentation. See below for +//! more details. +//! - **`//sync/2`** is a request-response protocol (see below) that lets one perform +//! requests for information about blocks. Each request is the encoding of a `BlockRequest` and +//! each response is the encoding of a `BlockResponse`, as defined in the `api.v1.proto` file in +//! this source tree. +//! - **`//light/2`** is a request-response protocol (see below) that lets one perform +//! light-client-related requests for information about the state. Each request is the encoding of +//! a `light::Request` and each response is the encoding of a `light::Response`, as defined in the +//! `light.v1.proto` file in this source tree. +//! - **`//transactions/1`** is a notifications protocol (see below) where +//! transactions are pushed to other nodes. The handshake is empty on both sides. The message +//! format is a SCALE-encoded list of transactions, where each transaction is an opaque list of +//! bytes. +//! - **`//block-announces/1`** is a notifications protocol (see below) where +//! block announces are pushed to other nodes. The handshake is empty on both sides. The message +//! format is a SCALE-encoded tuple containing a block header followed with an opaque list of +//! bytes containing some data associated with this block announcement, e.g. a candidate message. +//! - Notifications protocols that are registered using the `register_notifications_protocol` +//! method. For example: `/paritytech/grandpa/1`. See below for more information. +//! +//! ## The legacy Substrate substream //! //! Substrate uses a component named the **peerset manager (PSM)**. Through the discovery //! mechanism, the PSM is aware of the nodes that are part of the network and decides which nodes @@ -119,8 +153,8 @@ //! Note that at the moment there is no mechanism in place to solve the issues that arise where the //! two sides of a connection open the unique substream simultaneously. In order to not run into //! issues, only the dialer of a connection is allowed to open the unique substream. When the -//! substream is closed, the entire connection is closed as well. This is a bug, and should be -//! fixed by improving the protocol. +//! substream is closed, the entire connection is closed as well. This is a bug that will be +//! resolved by deprecating the protocol entirely. //! //! Within the unique Substrate substream, messages encoded using //! [*parity-scale-codec*](https://github.com/paritytech/parity-scale-codec) are exchanged. @@ -137,9 +171,46 @@ //! substream open with is chosen, and the information is requested from it. //! - Gossiping. Used for example by grandpa. //! -//! It is intended that in the future each of these components gets more isolated, so that they -//! are free to open and close their own substreams, and so that syncing and light client requests -//! are able to communicate with nodes outside of the range of the PSM. +//! ## Request-response protocols +//! +//! A so-called request-response protocol is defined as follow: +//! +//! - When a substream is opened, the opening side sends a message whose content is +//! protocol-specific. The message must be prefixed with an +//! [LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. After the +//! message has been sent, the writing side is closed. +//! - The remote sends back the response prefixed with a LEB128-encoded length, and closes its +//! side as well. +//! +//! Each request is performed in a new separate substream. +//! +//! ## Notifications protocols +//! +//! A so-called notifications protocol is defined as follow: +//! +//! - When a substream is opened, the opening side sends a handshake message whose content is +//! protocol-specific. The handshake message must be prefixed with an +//! [LEB128-encoded number](https://en.wikipedia.org/wiki/LEB128) indicating its length. The +//! handshake message can be of length 0, in which case the sender has to send a single `0`. +//! - The receiver then either immediately closes the substream, or answers with its own +//! LEB128-prefixed protocol-specific handshake response. The message can be of length 0, in which +//! case a single `0` has to be sent back. The receiver is then encouraged to close its sending +//! side. +//! - Once the handshake has completed, the notifications protocol is unidirectional. Only the +//! node which initiated the substream can push notifications. If the remote wants to send +//! notifications as well, it has to open its own undirectional substream. +//! - Each notification must be prefixed with an LEB128-encoded length. The encoding of the +//! messages is specific to each protocol. +//! +//! The API of `sc-network` allows one to register user-defined notification protocols. +//! `sc-network` automatically tries to open a substream towards each node for which the legacy +//! Substream substream is open. The handshake is then performed automatically. +//! +//! For example, the `sc-finality-grandpa` crate registers the `/paritytech/grandpa/1` +//! notifications protocol. +//! +//! At the moment, for backwards-compatibility, notification protocols are tied to the legacy +//! Substrate substream. In the future, though, it will no longer be the case. //! //! # Usage //! @@ -177,28 +248,19 @@ pub mod network_state; pub use service::{NetworkService, NetworkStateInfo, NetworkWorker, ExHashT, ReportHandle}; pub use protocol::PeerInfo; -pub use protocol::event::{Event, DhtEvent}; +pub use protocol::event::{Event, DhtEvent, ObservedRole}; pub use protocol::sync::SyncState; pub use libp2p::{Multiaddr, PeerId}; #[doc(inline)] pub use libp2p::multiaddr; -// Note: these re-exports shouldn't be part of the public API of the crate and will be removed in -// the future. -#[doc(hidden)] -pub use protocol::message; -#[doc(hidden)] -pub use protocol::message::Status as StatusMessage; - pub use sc_peerset::ReputationChange; -/// Extension trait for `NetworkBehaviour` that also accepts discovering nodes. -trait DiscoveryNetBehaviour { - /// Notify the protocol that we have learned about the existence of nodes. - /// - /// Can (or most likely will) be called multiple times with the same `PeerId`s. - /// - /// Also note that there is no notification for expired nodes. The implementer must add a TTL - /// system, or remove nodes that will fail to reach. - fn add_discovered_nodes(&mut self, nodes: impl Iterator); -} +/// The maximum allowed number of established connections per peer. +/// +/// Typically, and by design of the network behaviours in this crate, +/// there is a single established connection per peer. However, to +/// avoid unnecessary and nondeterministic connection closure in +/// case of (possibly repeated) simultaneous dialing attempts between +/// two peers, the per-peer connection limit is not set to 1 but 2. +const MAX_CONNECTIONS_PER_PEER: usize = 2; diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index d672ed0b7f569507b8b94c1ae4766ab200938f9f..d881bf6fe243a7d5607fa9d3e0afba8760b517a5 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -16,16 +16,18 @@ //! On-demand requests service. -use crate::protocol::light_dispatch::RequestData; -use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; -use futures::{prelude::*, channel::mpsc, channel::oneshot}; +use crate::protocol::light_client_handler; + +use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; -use sp_blockchain::Error as ClientError; use sc_client_api::{ - Fetcher, FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, - RemoteChangesRequest, RemoteReadChildRequest, RemoteBodyRequest, + FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, + RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, }; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; /// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform /// network requests for some state. @@ -41,18 +43,77 @@ pub struct OnDemand { /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in /// asynchronous Rust at the moment - requests_queue: Mutex>>>, + requests_queue: Mutex>>>, /// Sending side of `requests_queue`. - requests_send: mpsc::UnboundedSender>, + requests_send: TracingUnboundedSender>, } -impl OnDemand where +/// Dummy implementation of `FetchChecker` that always assumes that responses are bad. +/// +/// Considering that it is the responsibility of the client to build the fetcher, it can use this +/// implementation if it knows that it will never perform any request. +#[derive(Default, Clone)] +pub struct AlwaysBadChecker; + +impl FetchChecker for AlwaysBadChecker { + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + _remote_header: Option, + _remote_proof: StorageProof, + ) -> Result { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_proof( + &self, + _request: &RemoteReadRequest, + _remote_proof: StorageProof, + ) -> Result,Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_child_proof( + &self, + _request: &RemoteReadChildRequest, + _remote_proof: StorageProof, + ) -> Result, Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_execution_proof( + &self, + _request: &RemoteCallRequest, + _remote_proof: StorageProof, + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_changes_proof( + &self, + _request: &RemoteChangesRequest, + _remote_proof: ChangesProof + ) -> Result, u32)>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_body_proof( + &self, + _request: &RemoteBodyRequest, + _body: Vec + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } +} + +impl OnDemand +where B::Header: HeaderT, { /// Creates new on-demand service. pub fn new(checker: Arc>) -> Self { - let (requests_send, requests_queue) = mpsc::unbounded(); + let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); let requests_queue = Mutex::new(Some(requests_queue)); OnDemand { @@ -74,12 +135,15 @@ impl OnDemand where /// /// If this function returns `None`, that means that the receiver has already been extracted in /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver(&self) -> Option>> { + pub(crate) fn extract_receiver(&self) + -> Option>> + { self.requests_queue.lock().take() } } -impl Fetcher for OnDemand where +impl Fetcher for OnDemand +where B: BlockT, B::Header: HeaderT, { @@ -91,40 +155,55 @@ impl Fetcher for OnDemand where fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteHeader(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Header { request, sender }); RemoteResponse { receiver } } fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteRead(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Read { request, sender }); RemoteResponse { receiver } } fn remote_read_child( &self, - request: RemoteReadChildRequest + request: RemoteReadChildRequest, ) -> Self::RemoteReadResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteReadChild(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); RemoteResponse { receiver } } fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteCall(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Call { request, sender }); RemoteResponse { receiver } } - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult { + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteChanges(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Changes { request, sender }); RemoteResponse { receiver } } fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { let (sender, receiver) = oneshot::channel(); - let _ = self.requests_send.unbounded_send(RequestData::RemoteBody(request, sender)); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Body { request, sender }); RemoteResponse { receiver } } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 2c0cb9df2e2001a5ff146e02a98356b2fa0e81a2..49479aa2d4fb001f4a591d83ddd8df69d0f234df 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -14,16 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{DiscoveryNetBehaviour, config::ProtocolId}; +use crate::config::ProtocolId; use crate::utils::interval; use bytes::{Bytes, BytesMut}; use futures::prelude::*; use generic_proto::{GenericProto, GenericProtoOut}; use libp2p::{Multiaddr, PeerId}; -use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; +use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::storage::{StorageKey, ChildInfo}; +use sp_core::{ + storage::{StorageKey, ChildInfo}, + hexdisplay::HexDisplay +}; use sp_consensus::{ BlockOrigin, block_validation::BlockAnnounceValidator, @@ -35,22 +38,20 @@ use sp_runtime::traits::{ Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub }; use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, BlockAttributes, Direction, FromBlock, Message, RequestId}; -use message::generic::Message as GenericMessage; -use light_dispatch::{LightDispatch, LightDispatchNetwork, RequestData}; -use prometheus_endpoint::{Registry, Gauge, register, PrometheusError, U64}; +use message::{BlockAnnounce, Message}; +use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; +use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; use sync::{ChainSync, SyncState}; use crate::service::{TransactionPool, ExHashT}; -use crate::config::{BoxFinalityProofRequestBuilder, Roles}; -use rustc_hex::ToHex; +use crate::config::BoxFinalityProofRequestBuilder; use std::borrow::Cow; -use std::collections::{BTreeMap, HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; use std::sync::Arc; use std::fmt::Write; -use std::{cmp, num::NonZeroUsize, pin::Pin, task::Poll, time}; +use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; use log::{log, Level, trace, debug, warn, error}; use crate::chain::{Client, FinalityProofProvider}; -use sc_client_api::{FetchChecker, ChangesProof, StorageProof}; +use sc_client_api::{ChangesProof, StorageProof}; use crate::error; use util::LruHashSet; use wasm_timer::Instant; @@ -72,11 +73,11 @@ pub mod block_requests; pub mod message; pub mod event; pub mod light_client_handler; -pub mod light_dispatch; pub mod sync; pub use block_requests::BlockRequests; pub use light_client_handler::LightClientHandler; +pub use generic_proto::LegacyConnectionKillError; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance @@ -142,92 +143,50 @@ struct Metrics { peers: Gauge, queued_blocks: Gauge, fork_targets: Gauge, - finality_proofs_pending: Gauge, - finality_proofs_active: Gauge, - finality_proofs_failed: Gauge, - finality_proofs_importing: Gauge, - justifications_pending: Gauge, - justifications_active: Gauge, - justifications_failed: Gauge, - justifications_importing: Gauge + finality_proofs: GaugeVec, + justifications: GaugeVec, } impl Metrics { fn register(r: &Registry) -> Result { Ok(Metrics { handshaking_peers: { - let g = Gauge::new("sync_handshaking_peers", "number of newly connected peers")?; + let g = Gauge::new("sync_handshaking_peers", "Number of newly connected peers")?; register(g, r)? }, obsolete_requests: { - let g = Gauge::new("sync_obsolete_requests", "total number of obsolete requests")?; + let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; register(g, r)? }, peers: { - let g = Gauge::new("sync_peers", "number of peers we sync with")?; + let g = Gauge::new("sync_peers", "Number of peers we sync with")?; register(g, r)? }, queued_blocks: { - let g = Gauge::new("sync_queued_blocks", "number of blocks in import queue")?; + let g = Gauge::new("sync_queued_blocks", "Number of blocks in import queue")?; register(g, r)? }, fork_targets: { - let g = Gauge::new("sync_fork_targets", "fork sync targets")?; - register(g, r)? - }, - justifications_pending: { - let g = Gauge::new( - "sync_extra_justifications_pending", - "number of pending extra justifications requests" - )?; - register(g, r)? - }, - justifications_active: { - let g = Gauge::new( - "sync_extra_justifications_active", - "number of active extra justifications requests" - )?; - register(g, r)? - }, - justifications_failed: { - let g = Gauge::new( - "sync_extra_justifications_failed", - "number of failed extra justifications requests" - )?; - register(g, r)? - }, - justifications_importing: { - let g = Gauge::new( - "sync_extra_justifications_importing", - "number of importing extra justifications requests" - )?; - register(g, r)? - }, - finality_proofs_pending: { - let g = Gauge::new( - "sync_extra_finality_proofs_pending", - "number of pending extra finality proof requests" - )?; - register(g, r)? - }, - finality_proofs_active: { - let g = Gauge::new( - "sync_extra_finality_proofs_active", - "number of active extra finality proof requests" - )?; + let g = Gauge::new("sync_fork_targets", "Number of fork sync targets")?; register(g, r)? }, - finality_proofs_failed: { - let g = Gauge::new( - "sync_extra_finality_proofs_failed", - "number of failed extra finality proof requests" + justifications: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_justifications", + "Number of extra justifications requests" + ), + &["status"], )?; register(g, r)? }, - finality_proofs_importing: { - let g = Gauge::new( - "sync_extra_finality_proofs_importing", - "number of importing extra finality proof requests" + finality_proofs: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_finality_proofs", + "Number of extra finality proof requests", + ), + &["status"], )?; register(g, r)? }, @@ -241,9 +200,9 @@ pub struct Protocol { tick_timeout: Pin + Send>>, /// Interval at which we call `propagate_extrinsics`. propagate_timeout: Pin + Send>>, + /// Pending list of messages to return from `poll` as a priority. + pending_messages: VecDeque>, config: ProtocolConfig, - /// Handler for light client requests. - light_dispatch: LightDispatch, genesis_hash: B::Hash, sync: ChainSync, context_data: ContextData, @@ -261,10 +220,16 @@ pub struct Protocol { behaviour: GenericProto, /// For each legacy gossiping engine ID, the corresponding new protocol name. protocol_name_by_engine: HashMap>, - /// For each protocol name, the legacy gossiping engine ID. - protocol_engine_by_name: HashMap, ConsensusEngineId>, + /// For each protocol name, the legacy equivalent. + legacy_equiv_by_name: HashMap, Fallback>, + /// Name of the protocol used for transactions. + transactions_protocol: Cow<'static, [u8]>, + /// Name of the protocol used for block announces. + block_announces_protocol: Cow<'static, [u8]>, /// Prometheus metrics. metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, } #[derive(Default)] @@ -310,132 +275,6 @@ pub struct PeerInfo { pub best_number: ::Number, } -struct LightDispatchIn<'a> { - behaviour: &'a mut GenericProto, - peerset: sc_peerset::PeersetHandle, -} - -impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { - fn report_peer(&mut self, who: &PeerId, reputation: sc_peerset::ReputationChange) { - self.peerset.report_peer(who.clone(), reputation) - } - - fn disconnect_peer(&mut self, who: &PeerId) { - self.behaviour.disconnect_peer(who) - } - - fn send_header_request(&mut self, who: &PeerId, id: RequestId, block: <::Header as HeaderT>::Number) { - let message: Message = message::generic::Message::RemoteHeaderRequest(message::RemoteHeaderRequest { - id, - block, - }); - - self.behaviour.send_packet(who, message.encode()) - } - - fn send_read_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - keys: Vec>, - ) { - let message: Message = message::generic::Message::RemoteReadRequest(message::RemoteReadRequest { - id, - block, - keys, - }); - - self.behaviour.send_packet(who, message.encode()) - } - - fn send_read_child_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - storage_key: Vec, - child_info: Vec, - child_type: u32, - keys: Vec>, - ) { - let message: Message = message::generic::Message::RemoteReadChildRequest(message::RemoteReadChildRequest { - id, - block, - storage_key, - child_info, - child_type, - keys, - }); - - self.behaviour.send_packet(who, message.encode()) - } - - fn send_call_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - method: String, - data: Vec - ) { - let message: Message = message::generic::Message::RemoteCallRequest(message::RemoteCallRequest { - id, - block, - method, - data, - }); - - self.behaviour.send_packet(who, message.encode()) - } - - fn send_changes_request( - &mut self, - who: &PeerId, - id: RequestId, - first: ::Hash, - last: ::Hash, - min: ::Hash, - max: ::Hash, - storage_key: Option>, - key: Vec, - ) { - let message: Message = message::generic::Message::RemoteChangesRequest(message::RemoteChangesRequest { - id, - first, - last, - min, - max, - storage_key, - key, - }); - - self.behaviour.send_packet(who, message.encode()) - } - - fn send_body_request( - &mut self, - who: &PeerId, - id: RequestId, - fields: BlockAttributes, - from: FromBlock<::Hash, <::Header as HeaderT>::Number>, - to: Option<::Hash>, - direction: Direction, - max: Option - ) { - let message: Message = message::generic::Message::BlockRequest(message::BlockRequest:: { - id, - fields, - from, - to, - direction, - max, - }); - - self.behaviour.send_packet(who, message.encode()) - } -} - /// Data necessary to create a context. struct ContextData { // All connected peers @@ -462,19 +301,31 @@ impl Default for ProtocolConfig { } } +/// Fallback mechanism to use to send a notification if no substream is open. +#[derive(Debug, Clone, PartialEq, Eq)] +enum Fallback { + /// Use a `Message::Consensus` with the given engine ID. + Consensus(ConsensusEngineId), + /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). + Transactions, + /// The message is the bytes encoding of a `BlockAnnounce`. + BlockAnnounce, +} + impl Protocol { /// Create a new instance. pub fn new( config: ProtocolConfig, chain: Arc>, - checker: Arc>, transaction_pool: Arc>, finality_proof_provider: Option>>, finality_proof_request_builder: Option>, protocol_id: ProtocolId, peerset_config: sc_peerset::PeersetConfig, block_announce_validator: Box + Send>, - metrics_registry: Option<&Registry> + metrics_registry: Option<&Registry>, + boot_node_ids: Arc>, + queue_size_report: Option, ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { let info = chain.info(); let sync = ChainSync::new( @@ -488,7 +339,7 @@ impl Protocol { let important_peers = { let mut imp_p = HashSet::new(); - for reserved in &peerset_config.reserved_nodes { + for reserved in peerset_config.priority_groups.iter().flat_map(|(_, l)| l.iter()) { imp_p.insert(reserved.clone()); } imp_p.shrink_to_fit(); @@ -497,18 +348,38 @@ impl Protocol { let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let behaviour = GenericProto::new(protocol_id, versions, peerset); + let mut behaviour = GenericProto::new(protocol_id.clone(), versions, peerset, queue_size_report); + + let mut legacy_equiv_by_name = HashMap::new(); + + let transactions_protocol: Cow<'static, [u8]> = Cow::from({ + let mut proto = b"/".to_vec(); + proto.extend(protocol_id.as_bytes()); + proto.extend(b"/transactions/1"); + proto + }); + behaviour.register_notif_protocol(transactions_protocol.clone(), Vec::new()); + legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); + + let block_announces_protocol: Cow<'static, [u8]> = Cow::from({ + let mut proto = b"/".to_vec(); + proto.extend(protocol_id.as_bytes()); + proto.extend(b"/block-announces/1"); + proto + }); + behaviour.register_notif_protocol(block_announces_protocol.clone(), Vec::new()); + legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); let protocol = Protocol { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), + pending_messages: VecDeque::new(), config, context_data: ContextData { peers: HashMap::new(), stats: HashMap::new(), chain, }, - light_dispatch: LightDispatch::new(checker), genesis_hash: info.genesis_hash, sync, handshaking_peers: HashMap::new(), @@ -518,12 +389,15 @@ impl Protocol { peerset_handle: peerset_handle.clone(), behaviour, protocol_name_by_engine: HashMap::new(), - protocol_engine_by_name: HashMap::new(), + legacy_equiv_by_name, + transactions_protocol, + block_announces_protocol, metrics: if let Some(r) = metrics_registry { Some(Metrics::register(r)?) } else { None - } + }, + boot_node_ids, }; Ok((protocol, peerset_handle)) @@ -598,25 +472,16 @@ impl Protocol { self.sync.status().queued_blocks } + /// Number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.sync.num_processed_blocks() + } + /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { self.sync.num_sync_requests() } - /// Starts a new data demand request. - /// - /// The parameter contains a `Sender` where the result, once received, must be sent. - pub(crate) fn add_light_client_request(&mut self, rq: RequestData) { - self.light_dispatch.add_request(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, rq); - } - - fn is_light_response(&self, who: &PeerId, response_id: message::RequestId) -> bool { - self.light_dispatch.is_light_response(&who, response_id) - } - fn handle_response( &mut self, who: PeerId, @@ -676,15 +541,10 @@ impl Protocol { GenericMessage::Status(s) => return self.on_status_message(who, s), GenericMessage::BlockRequest(r) => self.on_block_request(who, r), GenericMessage::BlockResponse(r) => { - // Note, this is safe because only `ordinary bodies` and `remote bodies` are received in this matter. - if self.is_light_response(&who, r.id) { - self.on_remote_body_response(who, r); - } else { - if let Some(request) = self.handle_response(who.clone(), &r) { - let outcome = self.on_block_response(who.clone(), request, r); - self.update_peer_info(&who); - return outcome - } + if let Some(request) = self.handle_response(who.clone(), &r) { + let outcome = self.on_block_response(who.clone(), request, r); + self.update_peer_info(&who); + return outcome } }, GenericMessage::BlockAnnounce(announce) => { @@ -695,20 +555,20 @@ impl Protocol { GenericMessage::Transactions(m) => self.on_extrinsics(who, m), GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), - GenericMessage::RemoteCallResponse(response) => - self.on_remote_call_response(who, response), + GenericMessage::RemoteCallResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), GenericMessage::RemoteReadRequest(request) => self.on_remote_read_request(who, request), - GenericMessage::RemoteReadResponse(response) => - self.on_remote_read_response(who, response), + GenericMessage::RemoteReadResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), GenericMessage::RemoteHeaderRequest(request) => self.on_remote_header_request(who, request), - GenericMessage::RemoteHeaderResponse(response) => - self.on_remote_header_response(who, response), + GenericMessage::RemoteHeaderResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), GenericMessage::RemoteChangesRequest(request) => self.on_remote_changes_request(who, request), - GenericMessage::RemoteChangesResponse(response) => - self.on_remote_changes_response(who, response), + GenericMessage::RemoteChangesResponse(_) => + warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), GenericMessage::FinalityProofRequest(request) => self.on_finality_proof_request(who, request), GenericMessage::FinalityProofResponse(response) => @@ -762,12 +622,18 @@ impl Protocol { ); } - fn send_message(&mut self, who: &PeerId, message: Message) { + fn send_message( + &mut self, + who: &PeerId, + message: Option<(Cow<'static, [u8]>, Vec)>, + legacy: Message, + ) { send_message::( &mut self.behaviour, &mut self.context_data.stats, who, message, + legacy, ); } @@ -793,10 +659,6 @@ impl Protocol { }; if let Some(_peer_data) = removed { self.sync.peer_disconnected(peer.clone()); - self.light_dispatch.on_disconnect(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, &peer); // Notify all the notification protocols as closed. CustomMessageOutcome::NotificationStreamClosed { @@ -824,11 +686,7 @@ impl Protocol { } } - fn on_block_request( - &mut self, - peer: PeerId, - request: message::BlockRequest - ) { + fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", request.id, peer, @@ -905,7 +763,7 @@ impl Protocol { blocks: blocks, }; trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); - self.send_message(&peer, GenericMessage::BlockResponse(response)) + self.send_message(&peer, None, GenericMessage::BlockResponse(response)) } /// Adjusts the reputation of a node. @@ -981,10 +839,6 @@ impl Protocol { /// > **Note**: This method normally doesn't have to be called except for testing purposes. pub fn tick(&mut self) { self.maintain_peers(); - self.light_dispatch.maintain_peers(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }); self.report_metrics() } @@ -1049,6 +903,17 @@ impl Protocol { ); self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); self.behaviour.disconnect_peer(&who); + + if self.boot_node_ids.contains(&who) { + error!( + target: "sync", + "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, + ); + } + return CustomMessageOutcome::None; } if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { @@ -1121,10 +986,7 @@ impl Protocol { }; let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); - self.light_dispatch.on_connect(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who.clone(), status.roles, status.best_number); + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); if info.roles.is_full() { match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { Ok(None) => (), @@ -1152,10 +1014,15 @@ impl Protocol { &mut self, target: PeerId, engine_id: ConsensusEngineId, - message: impl Into> + message: impl Into>, ) { if let Some(protocol_name) = self.protocol_name_by_engine.get(&engine_id) { - self.behaviour.write_notification(&target, engine_id, protocol_name.clone(), message); + let message = message.into(); + let fallback = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { + engine_id, + data: message.clone(), + }).encode(); + self.behaviour.write_notification(&target, protocol_name.clone(), message, fallback); } else { error!( target: "sub-libp2p", @@ -1167,31 +1034,24 @@ impl Protocol { /// Registers a new notifications protocol. /// - /// You are very strongly encouraged to call this method very early on. Any connection open - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notifications_protocol( - &mut self, + /// While registering a protocol while we already have open connections is discouraged, we + /// nonetheless handle it by notifying that we opened channels with everyone. This function + /// returns a list of substreams to open as a result. + pub fn register_notifications_protocol<'a>( + &'a mut self, engine_id: ConsensusEngineId, protocol_name: impl Into>, - ) -> Vec { + ) -> impl ExactSizeIterator + 'a { let protocol_name = protocol_name.into(); if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); } else { - self.behaviour.register_notif_protocol(protocol_name.clone(), engine_id, Vec::new()); - self.protocol_engine_by_name.insert(protocol_name, engine_id); + self.behaviour.register_notif_protocol(protocol_name.clone(), Vec::new()); + self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); } - // Registering a protocol while we already have open connections isn't great, but for now - // we handle it by notifying that we opened channels with everyone. self.context_data.peers.iter() - .map(|(peer_id, peer)| - event::Event::NotificationStreamOpened { - remote: peer_id.clone(), - engine_id, - roles: peer.info.roles, - }) - .collect() + .map(|(peer_id, peer)| (peer_id, peer.info.roles)) } /// Called when peer sends us new extrinsics @@ -1249,7 +1109,7 @@ impl Protocol { fn do_propagate_extrinsics( &mut self, extrinsics: &[(H, B::Extrinsic)], - ) -> HashMap> { + ) -> HashMap> { let mut propagated_to = HashMap::new(); for (who, peer) in self.context_data.peers.iter_mut() { // never send extrinsics to the light node @@ -1271,10 +1131,12 @@ impl Protocol { .push(who.to_base58()); } trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + let encoded = to_send.encode(); send_message:: ( &mut self.behaviour, &mut self.context_data.stats, &who, + Some((self.transactions_protocol.clone(), encoded)), GenericMessage::Transactions(to_send) ) } @@ -1329,7 +1191,7 @@ impl Protocol { trace!(target: "sync", "Announcing block {:?} to {}", hash, who); let inserted = peer.known_blocks.insert(hash); if inserted || force { - let message: Message = GenericMessage::BlockAnnounce(message::BlockAnnounce { + let message = message::BlockAnnounce { header: header.clone(), state: if peer.info.protocol_version >= 4 { if is_best { @@ -1345,13 +1207,16 @@ impl Protocol { } else { None }, - }); + }; + + let encoded = message.encode(); send_message:: ( &mut self.behaviour, &mut self.context_data.stats, &who, - message, + Some((self.block_announces_protocol.clone(), encoded)), + Message::::BlockAnnounce(message), ) } } @@ -1370,18 +1235,20 @@ impl Protocol { chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible }; - self.send_message(&who, GenericMessage::Status(status)) + self.send_message(&who, None, GenericMessage::Status(status)) } - fn on_block_announce(&mut self, who: PeerId, announce: BlockAnnounce) -> CustomMessageOutcome { + fn on_block_announce( + &mut self, + who: PeerId, + announce: BlockAnnounce, + ) -> CustomMessageOutcome { let hash = announce.header.hash(); + let number = *announce.header.number(); + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { peer.known_blocks.insert(hash.clone()); } - self.light_dispatch.update_best_number(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who.clone(), *announce.header.number()); let is_their_best = match announce.state.unwrap_or(message::BlockState::Best) { message::BlockState::Best => true, @@ -1396,7 +1263,11 @@ impl Protocol { // 1) we're on light client; // AND // 2) parent block is already imported and not pruned. - return CustomMessageOutcome::None + if is_their_best { + return CustomMessageOutcome::PeerNewBest(who, number); + } else { + return CustomMessageOutcome::None; + } } sync::OnBlockAnnounce::ImportHeader => () // We proceed with the import. } @@ -1421,33 +1292,37 @@ impl Protocol { }, ); match blocks_to_import { - Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), + Ok(sync::OnBlockData::Import(origin, blocks)) => { + if is_their_best { + self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); + } + CustomMessageOutcome::BlockImport(origin, blocks) + }, Ok(sync::OnBlockData::Request(peer, req)) => { self.send_request(&peer, GenericMessage::BlockRequest(req)); - CustomMessageOutcome::None + if is_their_best { + CustomMessageOutcome::PeerNewBest(who, number) + } else { + CustomMessageOutcome::None + } } Err(sync::BadPeer(id, repu)) => { self.behaviour.disconnect_peer(&id); self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None + if is_their_best { + CustomMessageOutcome::PeerNewBest(who, number) + } else { + CustomMessageOutcome::None + } } } } - /// Call this when a block has been imported in the import queue and we should announce it on - /// the network. - pub fn on_block_imported(&mut self, header: &B::Header, data: Vec, is_best: bool) { + /// Call this when a block has been imported in the import queue + pub fn on_block_imported(&mut self, header: &B::Header, is_best: bool) { if is_best { self.sync.update_chain_info(header); } - - // blocks are not announced by light clients - if self.config.roles.is_light() { - return; - } - - // send out block announcements - self.send_announcement(header, data, is_best, false); } /// Call this when a block has been finalized. The sync layer may have some additional @@ -1488,6 +1363,7 @@ impl Protocol { self.send_message( &who, + None, GenericMessage::RemoteCallResponse(message::RemoteCallResponse { id: request.id, proof, @@ -1557,6 +1433,13 @@ impl Protocol { self.sync.request_finality_proof(&hash, number) } + /// Notify the protocol that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + self.behaviour.add_discovered_nodes(peer_ids) + } + pub fn finality_proof_import_result( &mut self, request_block: (B::Hash, NumberFor), @@ -1565,18 +1448,6 @@ impl Protocol { self.sync.on_finality_proof_import(request_block, finalization_result) } - fn on_remote_call_response( - &mut self, - who: PeerId, - response: message::RemoteCallResponse - ) { - trace!(target: "sync", "Remote call response {} from {}", response.id, who); - self.light_dispatch.on_remote_call_response(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who, response); - } - fn on_remote_read_request( &mut self, who: PeerId, @@ -1590,11 +1461,11 @@ impl Protocol { } let keys_str = || match request.keys.len() { - 1 => request.keys[0].to_hex::(), + 1 => HexDisplay::from(&request.keys[0]).to_string(), _ => format!( "{}..{}", - request.keys[0].to_hex::(), - request.keys[request.keys.len() - 1].to_hex::(), + HexDisplay::from(&request.keys[0]), + HexDisplay::from(&request.keys[request.keys.len() - 1]), ), }; @@ -1618,6 +1489,7 @@ impl Protocol { }; self.send_message( &who, + None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, proof, @@ -1638,16 +1510,16 @@ impl Protocol { } let keys_str = || match request.keys.len() { - 1 => request.keys[0].to_hex::(), + 1 => HexDisplay::from(&request.keys[0]).to_string(), _ => format!( "{}..{}", - request.keys[0].to_hex::(), - request.keys[request.keys.len() - 1].to_hex::(), + HexDisplay::from(&request.keys[0]), + HexDisplay::from(&request.keys[request.keys.len() - 1]), ), }; trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", - request.id, who, request.storage_key.to_hex::(), keys_str(), request.block); + request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { match self.context_data.chain.read_child_proof( &BlockId::Hash(request.block), @@ -1660,7 +1532,7 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", request.id, who, - request.storage_key.to_hex::(), + HexDisplay::from(&request.storage_key), keys_str(), request.block, error @@ -1672,7 +1544,7 @@ impl Protocol { trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", request.id, who, - request.storage_key.to_hex::(), + HexDisplay::from(&request.storage_key), keys_str(), request.block, "invalid child info and type", @@ -1682,6 +1554,7 @@ impl Protocol { }; self.send_message( &who, + None, GenericMessage::RemoteReadResponse(message::RemoteReadResponse { id: request.id, proof, @@ -1689,18 +1562,6 @@ impl Protocol { ); } - fn on_remote_read_response( - &mut self, - who: PeerId, - response: message::RemoteReadResponse - ) { - trace!(target: "sync", "Remote read response {} from {}", response.id, who); - self.light_dispatch.on_remote_read_response(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who, response); - } - fn on_remote_header_request( &mut self, who: PeerId, @@ -1722,6 +1583,7 @@ impl Protocol { }; self.send_message( &who, + None, GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { id: request.id, header, @@ -1730,18 +1592,6 @@ impl Protocol { ); } - fn on_remote_header_response( - &mut self, - who: PeerId, - response: message::RemoteHeaderResponse, - ) { - trace!(target: "sync", "Remote header proof response {} from {}", response.id, who); - self.light_dispatch.on_remote_header_response(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who, response); - } - fn on_remote_changes_request( &mut self, who: PeerId, @@ -1751,9 +1601,9 @@ impl Protocol { request.id, who, if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", sk.to_hex::(), request.key.to_hex::()) + format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&request.key)) } else { - request.key.to_hex::() + HexDisplay::from(&request.key).to_string() }, request.first, request.last @@ -1774,9 +1624,9 @@ impl Protocol { request.id, who, if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) } else { - key.0.to_hex::() + HexDisplay::from(&key.0).to_string() }, request.first, request.last, @@ -1792,6 +1642,7 @@ impl Protocol { }; self.send_message( &who, + None, GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { id: request.id, max: proof.max_block, @@ -1802,22 +1653,6 @@ impl Protocol { ); } - fn on_remote_changes_response( - &mut self, - who: PeerId, - response: message::RemoteChangesResponse, B::Hash>, - ) { - trace!(target: "sync", "Remote changes proof response {} from {} (max={})", - response.id, - who, - response.max - ); - self.light_dispatch.on_remote_changes_response(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, who, response); - } - fn on_finality_proof_request( &mut self, who: PeerId, @@ -1842,6 +1677,7 @@ impl Protocol { }; self.send_message( &who, + None, GenericMessage::FinalityProofResponse(message::FinalityProofResponse { id: 0, block: request.block, @@ -1868,17 +1704,6 @@ impl Protocol { } } - fn on_remote_body_response( - &mut self, - peer: PeerId, - response: message::BlockResponse - ) { - self.light_dispatch.on_remote_body_response(LightDispatchIn { - behaviour: &mut self.behaviour, - peerset: self.peerset_handle.clone(), - }, peer, response); - } - fn format_stats(&self) -> String { let mut out = String::new(); for (id, stats) in &self.context_data.stats { @@ -1917,15 +1742,23 @@ impl Protocol { metrics.fork_targets.set(m.fork_targets.into()); metrics.queued_blocks.set(m.queued_blocks.into()); - metrics.justifications_pending.set(m.justifications.pending_requests.into()); - metrics.justifications_active.set(m.justifications.active_requests.into()); - metrics.justifications_failed.set(m.justifications.failed_requests.into()); - metrics.justifications_importing.set(m.justifications.importing_requests.into()); - - metrics.finality_proofs_pending.set(m.finality_proofs.pending_requests.into()); - metrics.finality_proofs_active.set(m.finality_proofs.active_requests.into()); - metrics.finality_proofs_failed.set(m.finality_proofs.failed_requests.into()); - metrics.finality_proofs_importing.set(m.finality_proofs.importing_requests.into()); + metrics.justifications.with_label_values(&["pending"]) + .set(m.justifications.pending_requests.into()); + metrics.justifications.with_label_values(&["active"]) + .set(m.justifications.active_requests.into()); + metrics.justifications.with_label_values(&["failed"]) + .set(m.justifications.failed_requests.into()); + metrics.justifications.with_label_values(&["importing"]) + .set(m.justifications.importing_requests.into()); + + metrics.finality_proofs.with_label_values(&["pending"]) + .set(m.finality_proofs.pending_requests.into()); + metrics.finality_proofs.with_label_values(&["active"]) + .set(m.finality_proofs.active_requests.into()); + metrics.finality_proofs.with_label_values(&["failed"]) + .set(m.finality_proofs.failed_requests.into()); + metrics.finality_proofs.with_label_values(&["importing"]) + .set(m.finality_proofs.importing_requests.into()); } } } @@ -1942,6 +1775,8 @@ pub enum CustomMessageOutcome { NotificationStreamClosed { remote: PeerId, protocols: Vec }, /// Messages have been received on one or more notifications protocols. NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, + /// Peer has a reported a new head of chain. + PeerNewBest(PeerId, NumberFor), None, } @@ -1963,20 +1798,25 @@ fn send_request( peer.block_request = Some((Instant::now(), r.clone())); } } - send_message::(behaviour, stats, who, message) + send_message::(behaviour, stats, who, None, message) } fn send_message( behaviour: &mut GenericProto, stats: &mut HashMap<&'static str, PacketStats>, who: &PeerId, - message: Message, + message: Option<(Cow<'static, [u8]>, Vec)>, + legacy_message: Message, ) { - let encoded = message.encode(); - let mut stats = stats.entry(message.id()).or_default(); + let encoded = legacy_message.encode(); + let mut stats = stats.entry(legacy_message.id()).or_default(); stats.bytes_out += encoded.len() as u64; stats.count_out += 1; - behaviour.send_packet(who, encoded); + if let Some((proto, msg)) = message { + behaviour.write_notification(who, proto, msg, encoded); + } else { + behaviour.send_packet(who, encoded); + } } impl NetworkBehaviour for Protocol { @@ -1991,20 +1831,29 @@ impl NetworkBehaviour for Protocol { self.behaviour.addresses_of_peer(peer_id) } - fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { - self.behaviour.inject_connected(peer_id, endpoint) + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.behaviour.inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.behaviour.inject_connection_closed(peer_id, conn, endpoint) } - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - self.behaviour.inject_disconnected(peer_id, endpoint) + fn inject_connected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_connected(peer_id) } - fn inject_node_event( + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_disconnected(peer_id) + } + + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, event: <::Handler as ProtocolsHandler>::OutEvent, ) { - self.behaviour.inject_node_event(peer_id, event) + self.behaviour.inject_event(peer_id, connection, event) } fn poll( @@ -2017,6 +1866,10 @@ impl NetworkBehaviour for Protocol { Self::OutEvent > > { + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + } + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { self.tick(); } @@ -2057,10 +1910,10 @@ impl NetworkBehaviour for Protocol { Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id }), - Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }) => - return Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, event }), + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), }; @@ -2073,8 +1926,39 @@ impl NetworkBehaviour for Protocol { GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { self.on_peer_disconnected(peer_id.clone()) }, - GenericProtoOut::CustomMessage { peer_id, message } => + GenericProtoOut::LegacyMessage { peer_id, message } => self.on_custom_message(peer_id, message), + GenericProtoOut::Notification { peer_id, protocol_name, message } => + match self.legacy_equiv_by_name.get(&protocol_name) { + Some(Fallback::Consensus(engine_id)) => { + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(*engine_id, message.freeze())], + } + } + Some(Fallback::Transactions) => { + if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { + self.on_extrinsics(peer_id, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + CustomMessageOutcome::None + } + Some(Fallback::BlockAnnounce) => { + if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { + let outcome = self.on_block_announce(peer_id.clone(), announce); + self.update_peer_info(&peer_id); + outcome + } else { + warn!(target: "sub-libp2p", "Failed to decode block announce"); + CustomMessageOutcome::None + } + } + None => { + error!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + CustomMessageOutcome::None + } + } GenericProtoOut::Clogged { peer_id, messages } => { debug!(target: "sync", "{} clogging messages:", messages.len()); for msg in messages.into_iter().take(5) { @@ -2093,10 +1977,6 @@ impl NetworkBehaviour for Protocol { } } - fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) { - self.behaviour.inject_replaced(peer_id, closed_endpoint, new_endpoint) - } - fn inject_addr_reach_failure( &mut self, peer_id: Option<&PeerId>, @@ -2126,14 +2006,8 @@ impl NetworkBehaviour for Protocol { self.behaviour.inject_listener_error(id, err); } - fn inject_listener_closed(&mut self, id: ListenerId) { - self.behaviour.inject_listener_closed(id); - } -} - -impl DiscoveryNetBehaviour for Protocol { - fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.behaviour.add_discovered_nodes(peer_ids) + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.behaviour.inject_listener_closed(id, reason); } } @@ -2146,8 +2020,7 @@ impl Drop for Protocol { #[cfg(test)] mod tests { use crate::PeerId; - use crate::protocol::light_dispatch::AlwaysBadChecker; - use crate::config::{EmptyTransactionPool, Roles}; + use crate::config::EmptyTransactionPool; use super::{CustomMessageOutcome, Protocol, ProtocolConfig}; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; @@ -2160,12 +2033,8 @@ mod tests { let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); let (mut protocol, _) = Protocol::::new( - ProtocolConfig { - roles: Roles::FULL, - max_parallel_downloads: 10, - }, + ProtocolConfig::default(), client.clone(), - Arc::new(AlwaysBadChecker), Arc::new(EmptyTransactionPool), None, None, @@ -2175,10 +2044,12 @@ mod tests { out_peers: 10, bootnodes: Vec::new(), reserved_only: false, - reserved_nodes: Vec::new(), + priority_groups: Vec::new(), }, Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - None + None, + Default::default(), + None, ).unwrap(); let dummy_peer_id = PeerId::random(); diff --git a/client/network/src/protocol/block_requests.rs b/client/network/src/protocol/block_requests.rs index 5a947c0b6b5854ce05e393ea563976bb85656592..6af5023d39fe6b20661a4883c0d8a47416091c4e 100644 --- a/client/network/src/protocol/block_requests.rs +++ b/client/network/src/protocol/block_requests.rs @@ -35,6 +35,7 @@ use libp2p::{ ConnectedPoint, Multiaddr, PeerId, + connection::ConnectionId, upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, upgrade::{DeniedUpgrade, read_one, write_one} }, @@ -43,6 +44,7 @@ use libp2p::{ NetworkBehaviour, NetworkBehaviourAction, OneShotHandler, + OneShotHandlerConfig, PollParameters, SubstreamProtocol } @@ -257,20 +259,27 @@ where max_request_len: self.config.max_request_len, protocol: self.config.protocol.clone(), }; - OneShotHandler::new(SubstreamProtocol::new(p), self.config.inactivity_timeout) + let mut cfg = OneShotHandlerConfig::default(); + cfg.inactive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p), cfg) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { Vec::new() } - fn inject_connected(&mut self, _peer: PeerId, _info: ConnectedPoint) { + fn inject_connected(&mut self, _peer: &PeerId) { } - fn inject_disconnected(&mut self, _peer: &PeerId, _info: ConnectedPoint) { + fn inject_disconnected(&mut self, _peer: &PeerId) { } - fn inject_node_event(&mut self, peer: PeerId, Request(request, mut stream): Request) { + fn inject_event( + &mut self, + peer: PeerId, + connection: ConnectionId, + Request(request, mut stream): Request + ) { match self.on_block_request(&peer, &request) { Ok(res) => { log::trace!("enqueueing block response for peer {} with {} blocks", peer, res.blocks.len()); diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index 78490863be9c250ad78605f634c95670ab8bf9ce..637bf805b5024cb24e1694018e72940d327b8c47 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -17,7 +17,6 @@ //! Network event types. These are are not the part of the protocol, but rather //! events that happen on the network like DHT get/put results received. -use crate::config::Roles; use bytes::Bytes; use libp2p::core::PeerId; use libp2p::kad::record::Key; @@ -55,8 +54,8 @@ pub enum Event { remote: PeerId, /// The concerned protocol. Each protocol uses a different substream. engine_id: ConsensusEngineId, - /// Roles that the remote . - roles: Roles, + /// Role of the remote. + role: ObservedRole, }, /// Closed a substream with the given node. Always matches a corresponding previous @@ -76,3 +75,26 @@ pub enum Event { messages: Vec<(ConsensusEngineId, Bytes)>, }, } + +/// Role that the peer sent to us during the handshake, with the addition of what our local node +/// knows about that peer. +#[derive(Debug, Clone)] +pub enum ObservedRole { + /// Full node. + Full, + /// Light node. + Light, + /// When we are a validator node, this is a sentry that protects us. + OurSentry, + /// When we are a sentry node, this is the authority we are protecting. + OurGuardedAuthority, + /// Third-party authority. + Authority, +} + +impl ObservedRole { + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, ObservedRole::Light) + } +} diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index f703287f386fdcf9aebcfc8d3fa3d0c5971c7eff..cf8434d8bceffc5140a55d23d4aae1fc4fc4fa43 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -21,6 +21,7 @@ //! network, then performs the Substrate protocol handling on top. pub use self::behaviour::{GenericProto, GenericProtoOut}; +pub use self::handler::LegacyConnectionKillError; mod behaviour; mod handler; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index 727415baaf5bcd9a4b06ca847d9aeb6968f78ab1..e62edb373380efa548a510369db3397deb3bbc06 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -14,35 +14,40 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{DiscoveryNetBehaviour, config::ProtocolId}; -use crate::protocol::message::generic::{Message as GenericMessage, ConsensusMessage}; +use crate::config::ProtocolId; use crate::protocol::generic_proto::handler::{NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}; use crate::protocol::generic_proto::upgrade::RegisteredProtocol; use bytes::BytesMut; -use codec::Encode as _; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; +use libp2p::swarm::{ + DialPeerCondition, + NetworkBehaviour, + NetworkBehaviourAction, + NotifyHandler, + PollParameters +}; use log::{debug, error, trace, warn}; +use prometheus_endpoint::HistogramVec; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; -use sp_runtime::ConsensusEngineId; -use std::{borrow::Cow, collections::hash_map::Entry, cmp}; -use std::{error, mem, pin::Pin, str, time::Duration}; use std::task::{Context, Poll}; +use std::{borrow::Cow, cmp, collections::hash_map::Entry}; +use std::{error, mem, pin::Pin, str, time::Duration}; use wasm_timer::Instant; -/// Network behaviour that handles opening substreams for custom protocols with other nodes. +/// Network behaviour that handles opening substreams for custom protocols with other peers. /// /// ## Legacy vs new protocol /// /// The `GenericProto` behaves as following: /// -/// - Whenever a connection is established, we open a single substream (called "legay protocol" in -/// the source code). This substream name depends on the `protocol_id` and `versions` passed at -/// initialization. If the remote refuses this substream, we close the connection. +/// - Whenever a connection is established, we open a single substream (called "legacy protocol" in +/// the source code) on that connection. This substream name depends on the `protocol_id` and +/// `versions` passed at initialization. If the remote refuses this substream, we close the +/// connection. /// /// - For each registered protocol, we also open an additional substream for this protocol. If the /// remote refuses this substream, then it's fine. @@ -57,34 +62,58 @@ use wasm_timer::Instant; /// /// - The libp2p swarm that opens new connections and reports disconnects. /// - The connection handler (see `handler.rs`) that handles individual connections. -/// - The peerset manager (PSM) that requests links to nodes to be established or broken. +/// - The peerset manager (PSM) that requests links to peers to be established or broken. /// - The external API, that requires knowledge of the links that have been established. /// /// Each connection handler can be in four different states: Enabled+Open, Enabled+Closed, /// Disabled+Open, or Disabled+Closed. The Enabled/Disabled component must be in sync with the /// peerset manager. For example, if the peerset manager requires a disconnection, we disable the -/// existing handler. The Open/Closed component must be in sync with the external API. +/// connection handlers of that peer. The Open/Closed component must be in sync with the external +/// API. +/// +/// However, a connection handler for a peer only exists if we are actually connected to that peer. +/// What this means is that there are six possible states for each peer: Disconnected, Dialing +/// (trying to connect), Enabled+Open, Enabled+Closed, Disabled+Open, Disabled+Closed. +/// Most notably, the Dialing state must correspond to a "link established" state in the peerset +/// manager. In other words, the peerset manager doesn't differentiate whether we are dialing a +/// peer or connected to it. /// -/// However a connection handler only exists if we are actually connected to a node. What this -/// means is that there are six possible states for each node: Disconnected, Dialing (trying to -/// reach it), Enabled+Open, Enabled+Closed, Disabled+open, Disabled+Closed. Most notably, the -/// Dialing state must correspond to a "link established" state in the peerset manager. In other -/// words, the peerset manager doesn't differentiate whether we are dialing a node or connected -/// to it. +/// There may be multiple connections to a peer. However, the status of a peer on +/// the API of this behaviour and towards the peerset manager is aggregated in +/// the following way: /// -/// Additionally, there also exists a "banning" system. If we fail to dial a node, we "ban" it for -/// a few seconds. If the PSM requests a node that is in the "banned" state, then we delay the -/// actual dialing attempt until after the ban expires, but the PSM will still consider the link -/// to be established. -/// Note that this "banning" system is not an actual ban. If a "banned" node tries to connect to -/// us, we accept the connection. The "banning" system is only about delaying dialing attempts. +/// 1. The enabled/disabled status is the same across all connections, as +/// decided by the peerset manager. +/// 2. `send_packet` and `write_notification` always send all data over +/// the same connection to preserve the ordering provided by the transport, +/// as long as that connection is open. If it closes, a second open +/// connection may take over, if one exists, but that case should be no +/// different than a single connection failing and being re-established +/// in terms of potential reordering and dropped messages. Messages can +/// be received on any connection. +/// 3. The behaviour reports `GenericProtoOut::CustomProtocolOpen` when the +/// first connection reports `NotifsHandlerOut::Open`. +/// 4. The behaviour reports `GenericProtoOut::CustomProtocolClosed` when the +/// last connection reports `NotifsHandlerOut::Closed`. +/// +/// In this way, the number of actual established connections to the peer is +/// an implementation detail of this behaviour. Note that, in practice and at +/// the time of this writing, there may be at most two connections to a peer +/// and only as a result of simultaneous dialing. However, the implementation +/// accommodates for any number of connections. +/// +/// Additionally, there also exists a "banning" system. If we fail to dial a peer, we "ban" it for +/// a few seconds. If the PSM requests connecting to a peer that is currently "banned", the next +/// dialing attempt is delayed until after the ban expires. However, the PSM will still consider +/// the peer to be connected. This "ban" is thus not a ban in a strict sense: If a "banned" peer +/// tries to connect, the connection is accepted. A ban only delays dialing attempts. /// pub struct GenericProto { /// Legacy protocol to open with peers. Never modified. legacy_protocol: RegisteredProtocol, /// Notification protocols. Entries are only ever added and not removed. - notif_protocols: Vec<(Cow<'static, [u8]>, ConsensusEngineId, Vec)>, + notif_protocols: Vec<(Cow<'static, [u8]>, Vec)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: sc_peerset::Peerset, @@ -102,6 +131,9 @@ pub struct GenericProto { /// Events to produce from `poll()`. events: SmallVec<[NetworkBehaviourAction; 4]>, + + /// If `Some`, report the message queue sizes on this `Histogram`. + queue_size_report: Option, } /// State of a peer we're connected to. @@ -112,14 +144,14 @@ enum PeerState { /// the state machine code. Poisoned, - /// The peer misbehaved. If the PSM wants us to connect to this node, we will add an artificial + /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial /// delay to the connection. Banned { - /// Until when the node is banned. + /// Until when the peer is banned. until: Instant, }, - /// The peerset requested that we connect to this peer. We are not connected to this node. + /// The peerset requested that we connect to this peer. We are currently not connected. PendingRequest { /// When to actually start dialing. timer: futures_timer::Delay, @@ -130,16 +162,13 @@ enum PeerState { /// The peerset requested that we connect to this peer. We are currently dialing this peer. Requested, - /// We are connected to this peer but the peerset refused it. This peer can still perform - /// Kademlia queries and such, but should get disconnected in a few seconds. + /// We are connected to this peer but the peerset refused it. + /// + /// We may still have ongoing traffic with that peer, but it should cease shortly. Disabled { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we still have a custom protocol open with it. It will likely get closed in - /// a short amount of time, but we need to keep the information in order to not have a - /// state mismatch. - open: bool, - /// If `Some`, the node is banned until the given `Instant`. + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. banned_until: Option, }, @@ -147,12 +176,8 @@ enum PeerState { /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, /// but should get disconnected in a few seconds. DisabledPendingEnable { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we still have a custom protocol open with it. It will likely get closed in - /// a short amount of time, but we need to keep the information in order to not have a - /// state mismatch. - open: bool, + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, /// When to enable this remote. timer: futures_timer::Delay, /// When the `timer` will trigger. @@ -162,33 +187,41 @@ enum PeerState { /// We are connected to this peer and the peerset has accepted it. The handler is in the /// enabled state. Enabled { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - /// If true, we have a custom protocol open with this peer. - open: bool, + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, }, - /// We are connected to this peer, and we sent an incoming message to the peerset. The handler - /// is in initialization mode. We are waiting for the Accept or Reject from the peerset. There - /// is a corresponding entry in `incoming`. - Incoming { - /// How we are connected to this peer. - connected_point: ConnectedPoint, - }, + /// We received an incoming connection from this peer and forwarded that + /// connection request to the peerset. The connection handlers are waiting + /// for initialisation, i.e. to be enabled or disabled based on whether + /// the peerset accepts or rejects the peer. + Incoming, } impl PeerState { - /// True if we have an open channel with that node. + /// True if there exists an established connection to tbe peer + /// that is open for custom protocol traffic. fn is_open(&self) -> bool { + self.get_open().is_some() + } + + /// Returns the connection ID of the first established connection + /// that is open for custom protocol traffic. + fn get_open(&self) -> Option { match self { - PeerState::Poisoned => false, - PeerState::Banned { .. } => false, - PeerState::PendingRequest { .. } => false, - PeerState::Requested => false, - PeerState::Disabled { open, .. } => *open, - PeerState::DisabledPendingEnable { open, .. } => *open, - PeerState::Enabled { open, .. } => *open, - PeerState::Incoming { .. } => false, + PeerState::Disabled { open, .. } | + PeerState::DisabledPendingEnable { open, .. } | + PeerState::Enabled { open, .. } => + if !open.is_empty() { + Some(open[0]) + } else { + None + } + PeerState::Poisoned => None, + PeerState::Banned { .. } => None, + PeerState::PendingRequest { .. } => None, + PeerState::Requested => None, + PeerState::Incoming { .. } => None, } } @@ -210,7 +243,7 @@ impl PeerState { /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { - /// Id of the node that is concerned. + /// Id of the remote peer of the incoming connection. peer_id: PeerId, /// If true, this "incoming" still corresponds to an actual connection. If false, then the /// connection corresponding to it has been closed or replaced already. @@ -224,10 +257,8 @@ struct IncomingPeer { pub enum GenericProtoOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { - /// Id of the node we have opened a connection with. + /// Id of the peer we are connected to. peer_id: PeerId, - /// Endpoint used for this custom protocol. - endpoint: ConnectedPoint, }, /// Closed a custom protocol with the remote. @@ -238,12 +269,22 @@ pub enum GenericProtoOut { reason: Cow<'static, str>, }, + /// Receives a message on the legacy substream. + LegacyMessage { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Message that has been received. + message: BytesMut, + }, + /// Receives a message on a custom protocol substream. /// /// Also concerns received notifications for the notifications API. - CustomMessage { + Notification { /// Id of the peer the message came from. peer_id: PeerId, + /// Engine corresponding to the message. + protocol_name: Cow<'static, [u8]>, /// Message that has been received. message: BytesMut, }, @@ -260,10 +301,14 @@ pub enum GenericProtoOut { impl GenericProto { /// Creates a `CustomProtos`. + /// + /// The `queue_size_report` is an optional Prometheus metric that can report the size of the + /// messages queue. If passed, it must have one label for the protocol name. pub fn new( protocol: impl Into, versions: &[u8], peerset: sc_peerset::Peerset, + queue_size_report: Option, ) -> Self { let legacy_protocol = RegisteredProtocol::new(protocol, versions); @@ -275,6 +320,7 @@ impl GenericProto { incoming: SmallVec::new(), next_incoming_index: sc_peerset::IncomingIndex(0), events: SmallVec::new(), + queue_size_report, } } @@ -285,10 +331,9 @@ impl GenericProto { pub fn register_notif_protocol( &mut self, protocol_name: impl Into>, - engine_id: ConsensusEngineId, handshake_msg: impl Into> ) { - self.notif_protocols.push((protocol_name.into(), engine_id, handshake_msg.into())); + self.notif_protocols.push((protocol_name.into(), handshake_msg.into())); } /// Returns the number of discovered nodes that we keep in memory. @@ -301,7 +346,7 @@ impl GenericProto { self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) } - /// Returns true if we have a channel open with this node. + /// Returns true if we have an open connection to the given peer. pub fn is_open(&self, peer_id: &PeerId) -> bool { self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) } @@ -312,8 +357,8 @@ impl GenericProto { self.disconnect_peer_inner(peer_id, None); } - /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the node for the - /// specific duration. + /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer + /// for the specific duration. fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { entry @@ -329,7 +374,11 @@ impl GenericProto { st @ PeerState::Banned { .. } => *entry.into_mut() = st, // DisabledPendingEnable => Disabled. - PeerState::DisabledPendingEnable { open, connected_point, timer_deadline, .. } => { + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _ + } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); let banned_until = Some(if let Some(ban) = ban { @@ -337,24 +386,31 @@ impl GenericProto { } else { timer_deadline }); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } + *entry.into_mut() = PeerState::Disabled { + open, + banned_until + } }, // Enabled => Disabled. - PeerState::Enabled { open, connected_point } => { + PeerState::Enabled { open } => { debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); self.peerset.dropped(peer_id.clone()); debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } + *entry.into_mut() = PeerState::Disabled { + open, + banned_until + } }, // Incoming => Disabled. - PeerState::Incoming { connected_point, .. } => { + PeerState::Incoming => { let inc = if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *entry.key() && i.alive) { inc @@ -366,12 +422,16 @@ impl GenericProto { inc.alive = false; debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { open: false, connected_point, banned_until } + *entry.into_mut() = PeerState::Disabled { + open: SmallVec::new(), + banned_until + } }, PeerState::Poisoned => @@ -399,6 +459,16 @@ impl GenericProto { } } + /// Notify the behaviour that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { + debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); + peer_id + })); + } + /// Sends a notification to a peer. /// /// Has no effect if the custom protocol is not open with the given peer. @@ -406,32 +476,40 @@ impl GenericProto { /// Also note that even if we have a valid open substream, it may in fact be already closed /// without us knowing, in which case the packet will not be received. /// - /// > **Note**: Ideally the `engine_id` parameter wouldn't be necessary. See the documentation - /// > of [`NotifsHandlerIn`] for more information. + /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't + /// support our protocol. One needs to pass the equivalent of what would have been passed + /// with `send_packet`. pub fn write_notification( &mut self, target: &PeerId, - engine_id: ConsensusEngineId, protocol_name: Cow<'static, [u8]>, message: impl Into>, + encoded_fallback_message: Vec, ) { - if !self.is_open(target) { - return; - } + let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + None => { + debug!(target: "sub-libp2p", + "Tried to sent notification to {:?} without an open channel.", + target); + return + }, + Some(conn) => conn + }; trace!( target: "sub-libp2p", - "External API => Notification for {:?} with protocol {:?}", + "External API => Notification({:?}, {:?})", target, str::from_utf8(&protocol_name) ); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: target.clone(), + handler: NotifyHandler::One(conn), event: NotifsHandlerIn::SendNotification { message: message.into(), - engine_id, + encoded_fallback_message, protocol_name, }, }); @@ -444,14 +522,21 @@ impl GenericProto { /// Also note that even we have a valid open substream, it may in fact be already closed /// without us knowing, in which case the packet will not be received. pub fn send_packet(&mut self, target: &PeerId, message: Vec) { - if !self.is_open(target) { - return; - } + let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + None => { + debug!(target: "sub-libp2p", + "Tried to sent packet to {:?} without an open channel.", + target); + return + } + Some(conn) => conn + }; trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: target.clone(), + handler: NotifyHandler::One(conn), event: NotifsHandlerIn::SendLegacy { message, } @@ -463,7 +548,7 @@ impl GenericProto { self.peerset.debug_info() } - /// Function that is called when the peerset wants us to connect to a node. + /// Function that is called when the peerset wants us to connect to a peer. fn peerset_report_connect(&mut self, peer_id: PeerId) { let mut occ_entry = match self.peers.entry(peer_id) { Entry::Occupied(entry) => entry, @@ -471,7 +556,10 @@ impl GenericProto { // If there's no entry in `self.peers`, start dialing. debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: entry.key().clone() }); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: entry.key().clone(), + condition: DialPeerCondition::Disconnected + }); entry.insert(PeerState::Requested); return; } @@ -492,36 +580,41 @@ impl GenericProto { PeerState::Banned { .. } => { debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: occ_entry.key().clone() }); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: occ_entry.key().clone(), + condition: DialPeerCondition::Disconnected + }); *occ_entry.into_mut() = PeerState::Requested; }, - PeerState::Disabled { open, ref connected_point, banned_until: Some(ref banned) } - if *banned > now => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Has idle connection through \ - {:?} but node is banned until {:?}", occ_entry.key(), connected_point, banned); + PeerState::Disabled { + open, + banned_until: Some(ref banned) + } if *banned > now => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", + occ_entry.key(), banned); *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - connected_point: connected_point.clone(), open, timer: futures_timer::Delay::new(banned.clone() - now), timer_deadline: banned.clone(), }; }, - PeerState::Disabled { open, connected_point, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling previously-idle \ - connection through {:?}", occ_entry.key(), connected_point); + PeerState::Disabled { open, banned_until: _ } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Enable, }); - *occ_entry.into_mut() = PeerState::Enabled { connected_point, open }; + *occ_entry.into_mut() = PeerState::Enabled { open }; }, - PeerState::Incoming { connected_point, .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling incoming \ - connection through {:?}", occ_entry.key(), connected_point); + PeerState::Incoming => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + occ_entry.key()); if let Some(inc) = self.incoming.iter_mut() .find(|i| i.peer_id == *occ_entry.key() && i.alive) { inc.alive = false; @@ -530,26 +623,30 @@ impl GenericProto { incoming for incoming peer") } debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Enable, }); - *occ_entry.into_mut() = PeerState::Enabled { connected_point, open: false }; + *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; }, st @ PeerState::Enabled { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected to this \ - peer", occ_entry.key()); + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Already connected.", + occ_entry.key()); *occ_entry.into_mut() = st; }, st @ PeerState::DisabledPendingEnable { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already have an idle \ - connection to this peer and waiting to enable it", occ_entry.key()); + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Already pending enabling.", + occ_entry.key()); *occ_entry.into_mut() = st; }, st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { - warn!(target: "sub-libp2p", "PSM => Connect({:?}): Received a previous \ - request for that peer", occ_entry.key()); + warn!(target: "sub-libp2p", + "PSM => Connect({:?}): Duplicate request.", + occ_entry.key()); *occ_entry.into_mut() = st; }, @@ -558,55 +655,63 @@ impl GenericProto { } } - /// Function that is called when the peerset wants us to disconnect from a node. + /// Function that is called when the peerset wants us to disconnect from a peer. fn peerset_report_disconnect(&mut self, peer_id: PeerId) { let mut entry = match self.peers.entry(peer_id) { Entry::Occupied(entry) => entry, Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); return } }; match mem::replace(entry.get_mut(), PeerState::Poisoned) { st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Node already disabled", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); *entry.into_mut() = st; }, - PeerState::DisabledPendingEnable { open, connected_point, timer_deadline, .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending \ - enable", entry.key()); + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _ + } => { + debug!(target: "sub-libp2p", + "PSM => Drop({:?}): Interrupting pending enabling.", + entry.key()); *entry.into_mut() = PeerState::Disabled { open, - connected_point, banned_until: Some(timer_deadline), }; }, - PeerState::Enabled { open, connected_point } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connection", entry.key()); + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: entry.key().clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Disable, }); - *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until: None } + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None + } }, - st @ PeerState::Incoming { .. } => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Was in incoming mode", + st @ PeerState::Incoming => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", entry.key()); *entry.into_mut() = st; }, PeerState::Requested => { // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other - // sub-systems (such as the discovery mechanism) may require dialing this node as + // sub-systems (such as the discovery mechanism) may require dialing this peer as // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); entry.remove(); }, PeerState::PendingRequest { timer_deadline, .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Was not yet connected", entry.key()); + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); *entry.into_mut() = PeerState::Banned { until: timer_deadline } }, @@ -615,7 +720,8 @@ impl GenericProto { } } - /// Function that is called when the peerset wants us to accept an incoming node. + /// Function that is called when the peerset wants us to accept a connection + /// request from a peer. fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) @@ -632,34 +738,25 @@ impl GenericProto { return } - let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { - state - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ - corresponding to an alive incoming"); - return - }; - - let connected_point = if let PeerState::Incoming { connected_point } = state { - connected_point.clone() - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ - to an alive incoming is not in incoming state"); - return - }; - - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connection \ - through {:?}", index, incoming.peer_id, connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: incoming.peer_id, - event: NotifsHandlerIn::Enable, - }); - - *state = PeerState::Enabled { open: false, connected_point }; + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", + index, incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *state = PeerState::Enabled { open: SmallVec::new() }; + } + peer => error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer) + } } - /// Function that is called when the peerset wants us to reject an incoming node. + /// Function that is called when the peerset wants us to reject an incoming peer. fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { self.incoming.remove(pos) @@ -674,39 +771,25 @@ impl GenericProto { return } - let state = if let Some(state) = self.peers.get_mut(&incoming.peer_id) { - state - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in peers \ - corresponding to an alive incoming"); - return - }; - - let connected_point = if let PeerState::Incoming { connected_point } = state { - connected_point.clone() - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: entry in peers corresponding \ - to an alive incoming is not in incoming state"); - return - }; - - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connection through \ - {:?}", index, incoming.peer_id, connected_point); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { - peer_id: incoming.peer_id, - event: NotifsHandlerIn::Disable, - }); - *state = PeerState::Disabled { open: false, connected_point, banned_until: None }; - } -} - -impl DiscoveryNetBehaviour for GenericProto { - fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { - debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); - peer_id - })); + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", + index, incoming.peer_id); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + *state = PeerState::Disabled { + open: SmallVec::new(), + banned_until: None + }; + } + peer => error!(target: "sub-libp2p", + "State mismatch in libp2p: Expected alive incoming. Got {:?}.", + peer) + } } } @@ -715,33 +798,43 @@ impl NetworkBehaviour for GenericProto { type OutEvent = GenericProtoOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - NotifsHandlerProto::new(self.legacy_protocol.clone(), self.notif_protocols.clone()) + NotifsHandlerProto::new( + self.legacy_protocol.clone(), + self.notif_protocols.clone(), + self.queue_size_report.clone() + ) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { Vec::new() } - fn inject_connected(&mut self, peer_id: PeerId, connected_point: ConnectedPoint) { - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), connected_point) { - (st @ &mut PeerState::Requested, connected_point) | - (st @ &mut PeerState::PendingRequest { .. }, connected_point) => { - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Connection \ - requested by PSM (through {:?})", peer_id, connected_point + fn inject_connected(&mut self, _: &PeerId) { + } + + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", + conn, endpoint, peer_id); + match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { + (st @ &mut PeerState::Requested, endpoint) | + (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", + peer_id, endpoint ); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { + *st = PeerState::Enabled { open: SmallVec::new() }; + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - event: NotifsHandlerIn::Enable, + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable }); - *st = PeerState::Enabled { open: false, connected_point }; } - // Note: it may seem weird that "Banned" nodes get treated as if there were absent. + // Note: it may seem weird that "Banned" peers get treated as if they were absent. // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this node", and not "banned" in the sense that we would refuse the node altogether. - (st @ &mut PeerState::Poisoned, connected_point @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, connected_point @ ConnectedPoint::Listener { .. }) => { + // this peer", and not "banned" in the sense that we would refuse the peer altogether. + (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | + (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { let incoming_id = self.next_incoming_index.clone(); self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { Some(v) => v, @@ -750,61 +843,79 @@ impl NetworkBehaviour for GenericProto { return } }; - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Incoming connection", - peer_id); - debug!(target: "sub-libp2p", "PSM <= Incoming({:?}, {:?}): Through {:?}", - incoming_id, peer_id, connected_point); + debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", + peer_id, endpoint); + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + peer_id, incoming_id); self.peerset.incoming(peer_id.clone(), incoming_id); self.incoming.push(IncomingPeer { peer_id: peer_id.clone(), alive: true, incoming_id, }); - *st = PeerState::Incoming { connected_point }; + *st = PeerState::Incoming { }; } - (st @ &mut PeerState::Poisoned, connected_point) | - (st @ &mut PeerState::Banned { .. }, connected_point) => { + (st @ &mut PeerState::Poisoned, endpoint) | + (st @ &mut PeerState::Banned { .. }, endpoint) => { let banned_until = if let PeerState::Banned { until } = st { Some(*until) } else { None }; - debug!(target: "sub-libp2p", "Libp2p => Connected({:?}): Requested by something \ - else than PSM, disabling", peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { + debug!(target: "sub-libp2p", + "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", + peer_id, endpoint); + *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), - event: NotifsHandlerIn::Disable, + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable }); - *st = PeerState::Disabled { open: false, connected_point, banned_until }; } - st => { - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", "Received inject_connected for \ - already-connected node; state is {:?}", st - ); + (PeerState::Incoming { .. }, _) => { + debug!(target: "sub-libp2p", + "Secondary connection {:?} to {} waiting for PSM decision.", + conn, peer_id); + }, + + (PeerState::Enabled { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", + peer_id, conn); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable + }); + } + + (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", + peer_id, conn); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable + }); } } } - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", "Received inject_disconnected for non-connected \ - node {:?}", peer_id), - - Some(PeerState::Disabled { open, banned_until, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ - (through {:?})", peer_id, endpoint); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); - } - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", + conn, endpoint, peer_id); + match self.peers.get_mut(peer_id) { + Some(PeerState::Disabled { open, .. }) | + Some(PeerState::DisabledPendingEnable { open, .. }) | + Some(PeerState::Enabled { open, .. }) => { + // Check if the "link" to the peer is already considered closed, + // i.e. there is no connection that is open for custom protocols, + // in which case `CustomProtocolClosed` was already emitted. + let closed = open.is_empty(); + open.retain(|c| c != conn); + if open.is_empty() && !closed { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), @@ -813,52 +924,52 @@ impl NetworkBehaviour for GenericProto { self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } } + _ => {} + } + } - Some(PeerState::DisabledPendingEnable { open, timer_deadline, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was disabled \ - (through {:?}) but pending enable", peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; + fn inject_disconnected(&mut self, peer_id: &PeerId) { + match self.peers.remove(peer_id) { + None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | + Some(PeerState::Banned { .. }) => + // This is a serious bug either in this state machine or in libp2p. + error!(target: "sub-libp2p", + "`inject_disconnected` called for unknown peer {}", + peer_id), - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); + Some(PeerState::Disabled { banned_until, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); + if let Some(until) = banned_until { + self.peers.insert(peer_id.clone(), PeerState::Banned { until }); } } - Some(PeerState::Enabled { open, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was enabled \ - (through {:?})", peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + Some(PeerState::DisabledPendingEnable { timer_deadline, .. }) => { + debug!(target: "sub-libp2p", + "Libp2p => Disconnected({}): Was disabled but pending enable.", + peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); self.peerset.dropped(peer_id.clone()); + self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); + } + Some(PeerState::Enabled { .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); self.peers.insert(peer_id.clone(), PeerState::Banned { until: Instant::now() + Duration::from_secs(ban_dur) }); - - if open { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } } // In the incoming state, we don't report "Dropped". Instead we will just ignore the // corresponding Accept/Reject. - Some(PeerState::Incoming { .. }) => { + Some(PeerState::Incoming { }) => { if let Some(state) = self.incoming.iter_mut().find(|i| i.peer_id == *peer_id) { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({:?}): Was in incoming \ - mode (id {:?}, through {:?})", peer_id, state.incoming_id, endpoint); + debug!(target: "sub-libp2p", + "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", + peer_id, state.incoming_id); state.alive = false; } else { error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ @@ -867,7 +978,7 @@ impl NetworkBehaviour for GenericProto { } Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), } } @@ -878,13 +989,13 @@ impl NetworkBehaviour for GenericProto { fn inject_dial_failure(&mut self, peer_id: &PeerId) { if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The node is not in our list. + // The peer is not in our list. st @ PeerState::Banned { .. } => { trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); *entry.into_mut() = st; }, - // "Basic" situation: we failed to reach a node that the peerset requested. + // "Basic" situation: we failed to reach a peer that the peerset requested. PeerState::Requested | PeerState::PendingRequest { .. } => { debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); *entry.into_mut() = PeerState::Banned { @@ -894,7 +1005,7 @@ impl NetworkBehaviour for GenericProto { self.peerset.dropped(peer_id.clone()) }, - // We can still get dial failures even if we are already connected to the node, + // We can still get dial failures even if we are already connected to the peer, // as an extra diagnostic for an earlier attempt. st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { @@ -907,99 +1018,137 @@ impl NetworkBehaviour for GenericProto { } } else { - // The node is not in our list. + // The peer is not in our list. trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); } } - fn inject_node_event( + fn inject_event( &mut self, source: PeerId, + connection: ConnectionId, event: NotifsHandlerOut, ) { match event { - NotifsHandlerOut::Closed { reason } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Closed: {}", source, reason); + NotifsHandlerOut::Closed { endpoint, reason } => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", + source, endpoint, reason); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { entry } else { - error!(target: "sub-libp2p", "State mismatch in the custom protos handler"); + error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); return }; - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - reason, - peer_id: source.clone(), - }; - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { open, connected_point } => { - debug_assert!(open); - - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); + let last = match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut open } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); - self.events.push(NetworkBehaviourAction::SendEvent { + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: source.clone(), + handler: NotifyHandler::One(connection), event: NotifsHandlerIn::Disable, }); + let last = open.is_empty(); + + if last { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None + }; + } else { + *entry.into_mut() = PeerState::Enabled { open }; + } + + last + }, + PeerState::Disabled { mut open, banned_until } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); + let last = open.is_empty(); *entry.into_mut() = PeerState::Disabled { - open: false, - connected_point, - banned_until: None + open, + banned_until }; + last }, - PeerState::Disabled { open, connected_point, banned_until } => { - debug_assert!(open); - *entry.into_mut() = PeerState::Disabled { open: false, connected_point, banned_until }; - }, - PeerState::DisabledPendingEnable { open, connected_point, timer, timer_deadline } => { - debug_assert!(open); + PeerState::DisabledPendingEnable { + mut open, + timer, + timer_deadline + } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); + let last = open.is_empty(); *entry.into_mut() = PeerState::DisabledPendingEnable { - open: false, - connected_point, + open, timer, timer_deadline }; + last }, - _ => error!(target: "sub-libp2p", "State mismatch in the custom protos handler"), + state => { + error!(target: "sub-libp2p", + "Unexpected state in the custom protos handler: {:?}", + state); + return + } + }; + + if last { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + reason, + peer_id: source.clone(), + }; + self.events.push(NetworkBehaviourAction::GenerateEvent(event)); + } else { + debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); } } - NotifsHandlerOut::Open => { - debug!(target: "sub-libp2p", "Handler({:?}) => Open", source); - let endpoint = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, ref connected_point }) | - Some(PeerState::DisabledPendingEnable { ref mut open, ref connected_point, .. }) | - Some(PeerState::Disabled { ref mut open, ref connected_point, .. }) if !*open => { - *open = true; - connected_point.clone() + NotifsHandlerOut::Open { endpoint } => { + debug!(target: "sub-libp2p", + "Handler({:?}) => Endpoint {:?} open for custom protocols.", + source, endpoint); + + let first = match self.peers.get_mut(&source) { + Some(PeerState::Enabled { ref mut open, .. }) | + Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | + Some(PeerState::Disabled { ref mut open, .. }) => { + let first = open.is_empty(); + open.push(connection); + first } - _ => { - error!(target: "sub-libp2p", "State mismatch in the custom protos handler"); + state => { + error!(target: "sub-libp2p", + "Open: Unexpected state in the custom protos handler: {:?}", + state); return } }; - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { - peer_id: source, - endpoint, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); + if first { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { peer_id: source }; + self.events.push(NetworkBehaviourAction::GenerateEvent(event)); + } else { + debug!(target: "sub-libp2p", "Secondary connection opened custom protocol."); + } } NotifsHandlerOut::CustomMessage { message } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::CustomMessage { + let event = GenericProtoOut::LegacyMessage { peer_id: source, message, }; @@ -1007,7 +1156,7 @@ impl NetworkBehaviour for GenericProto { self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } - NotifsHandlerOut::Notification { protocol_name, engine_id, message } => { + NotifsHandlerOut::Notification { protocol_name, message } => { debug_assert!(self.is_open(&source)); trace!( target: "sub-libp2p", @@ -1015,18 +1164,11 @@ impl NetworkBehaviour for GenericProto { source, str::from_utf8(&protocol_name) ); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::CustomMessage { + trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); + let event = GenericProtoOut::Notification { peer_id: source, - message: { - let message = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { - engine_id, - data: message.to_vec(), - }); - - // Note that we clone `message` here. - From::from(&message.encode()[..]) - }, + protocol_name, + message, }; self.events.push(NetworkBehaviourAction::GenerateEvent(event)); @@ -1051,11 +1193,12 @@ impl NetworkBehaviour for GenericProto { } NotifsHandlerOut::ProtocolError { error, .. } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", + debug!(target: "sub-libp2p", + "Handler({:?}) => Severe protocol error: {:?}", source, error); - // A severe protocol error happens when we detect a "bad" node, such as a node on - // a different chain, or a node that doesn't speak the same protocol(s). We - // decrease the node's reputation, hence lowering the chances we try this node + // A severe protocol error happens when we detect a "bad" peer, such as a peer on + // a different chain, or a peer that doesn't speak the same protocol(s). We + // decrease the peer's reputation, hence lowering the chances we try this peer // again in the short term. self.peerset.report_peer( source.clone(), @@ -1109,27 +1252,34 @@ impl NetworkBehaviour for GenericProto { } debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); - self.events.push(NetworkBehaviourAction::DialPeer { peer_id: peer_id.clone() }); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: peer_id.clone(), + condition: DialPeerCondition::Disconnected + }); *peer_state = PeerState::Requested; } - PeerState::DisabledPendingEnable { mut timer, connected_point, open, timer_deadline } => { + PeerState::DisabledPendingEnable { + mut timer, + open, + timer_deadline + } => { if let Poll::Pending = Pin::new(&mut timer).poll(cx) { *peer_state = PeerState::DisabledPendingEnable { timer, - connected_point, open, timer_deadline }; continue; } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable now that ban has expired", peer_id); - self.events.push(NetworkBehaviourAction::SendEvent { + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { peer_id: peer_id.clone(), + handler: NotifyHandler::All, event: NotifsHandlerIn::Enable, }); - *peer_state = PeerState::Enabled { connected_point, open }; + *peer_state = PeerState::Enabled { open }; } st @ _ => *peer_state = st, diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index e97176cfbbfbb98cfa6f1f1d2f25dc9f5f777990..f0e2fc4bb8a8d70af5ef58298fa6d45be07228d8 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -15,6 +15,7 @@ // along with Substrate. If not, see . pub use self::group::{NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}; +pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; mod group; mod legacy; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index d6d9919d3e14df0c2261731cddb0811aa291f920..46b759d4580a64a1c947c2ac22e07bce957f76d4 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -51,10 +51,8 @@ use crate::protocol::generic_proto::{ handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, }; -use crate::protocol::message::generic::{Message as GenericMessage, ConsensusMessage}; use bytes::BytesMut; -use codec::Encode as _; use libp2p::core::{either::{EitherError, EitherOutput}, ConnectedPoint, PeerId}; use libp2p::core::upgrade::{EitherUpgrade, UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; use libp2p::swarm::{ @@ -65,9 +63,9 @@ use libp2p::swarm::{ SubstreamProtocol, NegotiatedSubstream, }; -use log::error; -use sp_runtime::ConsensusEngineId; -use std::{borrow::Cow, error, io, task::{Context, Poll}}; +use log::{debug, error}; +use prometheus_endpoint::HistogramVec; +use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -78,10 +76,10 @@ use std::{borrow::Cow, error, io, task::{Context, Poll}}; /// See the documentation at the module level for more information. pub struct NotifsHandlerProto { /// Prototypes for handlers for inbound substreams. - in_handlers: Vec<(NotifsInHandlerProto, ConsensusEngineId)>, + in_handlers: Vec, /// Prototypes for handlers for outbound substreams. - out_handlers: Vec<(NotifsOutHandlerProto, ConsensusEngineId)>, + out_handlers: Vec, /// Prototype for handler for backwards-compatibility. legacy: LegacyProtoHandlerProto, @@ -92,10 +90,10 @@ pub struct NotifsHandlerProto { /// See the documentation at the module level for more information. pub struct NotifsHandler { /// Handlers for inbound substreams. - in_handlers: Vec<(NotifsInHandler, ConsensusEngineId)>, + in_handlers: Vec, /// Handlers for outbound substreams. - out_handlers: Vec<(NotifsOutHandler, ConsensusEngineId)>, + out_handlers: Vec, /// Handler for backwards-compatibility. legacy: LegacyProtoHandler, @@ -121,7 +119,7 @@ impl IntoProtocolsHandler for NotifsHandlerProto { fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { let in_handlers = self.in_handlers.iter() - .map(|(h, _)| h.inbound_protocol()) + .map(|h| h.inbound_protocol()) .collect::>(); SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) @@ -131,11 +129,11 @@ impl IntoProtocolsHandler for NotifsHandlerProto { NotifsHandler { in_handlers: self.in_handlers .into_iter() - .map(|(p, e)| (p.into_handler(remote_peer_id, connected_point), e)) + .map(|p| p.into_handler(remote_peer_id, connected_point)) .collect(), out_handlers: self.out_handlers .into_iter() - .map(|(p, e)| (p.into_handler(remote_peer_id, connected_point), e)) + .map(|p| p.into_handler(remote_peer_id, connected_point)) .collect(), legacy: self.legacy.into_handler(remote_peer_id, connected_point), enabled: EnabledState::Initial, @@ -145,7 +143,7 @@ impl IntoProtocolsHandler for NotifsHandlerProto { } /// Event that can be received by a `NotifsHandler`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum NotifsHandlerIn { /// The node should start using custom protocols. Enable, @@ -155,7 +153,8 @@ pub enum NotifsHandlerIn { /// Sends a message through the custom protocol substream. /// - /// > **Note**: This must **not** be an encoded `ConsensusMessage` message. + /// > **Note**: This must **not** be a `ConsensusMessage`, `Transactions`, or + /// > `BlockAnnounce` message. SendLegacy { /// The message to send. message: Vec, @@ -166,17 +165,13 @@ pub enum NotifsHandlerIn { /// Name of the protocol for the message. /// /// Must match one of the registered protocols. For backwards-compatibility reasons, if - /// the remote doesn't support this protocol, we use the legacy substream to send a - /// `ConsensusMessage` message. + /// the remote doesn't support this protocol, we use the legacy substream. protocol_name: Cow<'static, [u8]>, - /// The engine ID to use, in case we need to send this message over the legacy substream. + /// Message to send on the legacy substream if the protocol isn't available. /// - /// > **Note**: Ideally this field wouldn't be necessary, and we would deduce the engine - /// > ID from the existing handlers. However, it is possible (especially in test - /// > situations) that we open connections before all the notification protocols - /// > have been registered, in which case we always rely on the legacy substream. - engine_id: ConsensusEngineId, + /// This corresponds to what you would have sent with `SendLegacy`. + encoded_fallback_message: Vec, /// The message to send. message: Vec, @@ -186,13 +181,18 @@ pub enum NotifsHandlerIn { /// Event that can be emitted by a `NotifsHandler`. #[derive(Debug)] pub enum NotifsHandlerOut { - /// Opened the substreams with the remote. - Open, + /// The connection is open for custom protocols. + Open { + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + }, - /// Closed the substreams with the remote. + /// The connection is closed for custom protocols. Closed { - /// Reason why the substream closed, for diagnostic purposes. + /// The reason for closing, for diagnostic purposes. reason: Cow<'static, str>, + /// The endpoint of the connection that closed for custom protocols. + endpoint: ConnectedPoint, }, /// Received a non-gossiping message on the legacy substream. @@ -206,17 +206,10 @@ pub enum NotifsHandlerOut { /// Received a message on a custom protocol substream. Notification { - /// Engine corresponding to the message. + /// Name of the protocol of the message. protocol_name: Cow<'static, [u8]>, - /// For legacy reasons, the name to use if we had received the message from the legacy - /// substream. - engine_id: ConsensusEngineId, - /// Message that has been received. - /// - /// If `protocol_name` is `None`, this decodes to a `Message`. If `protocol_name` is `Some`, - /// this is directly a gossiping message. message: BytesMut, }, @@ -238,12 +231,30 @@ pub enum NotifsHandlerOut { impl NotifsHandlerProto { /// Builds a new handler. - pub fn new(legacy: RegisteredProtocol, list: impl Into, ConsensusEngineId, Vec)>>) -> Self { + /// + /// The `queue_size_report` is an optional Prometheus metric that can report the size of the + /// messages queue. If passed, it must have one label for the protocol name. + pub fn new(legacy: RegisteredProtocol, list: impl Into, Vec)>>, queue_size_report: Option) -> Self { let list = list.into(); + let out_handlers = list + .clone() + .into_iter() + .map(|(p, _)| { + let queue_size_report = queue_size_report.as_ref().and_then(|qs| { + if let Ok(utf8) = str::from_utf8(&p) { + Some(qs.with_label_values(&[utf8])) + } else { + log::warn!("Ignoring Prometheus metric because {:?} isn't UTF-8", p); + None + } + }); + NotifsOutHandlerProto::new(p, queue_size_report) + }).collect(); + NotifsHandlerProto { - in_handlers: list.clone().into_iter().map(|(p, e, _)| (NotifsInHandlerProto::new(p), e)).collect(), - out_handlers: list.clone().into_iter().map(|(p, e, _)| (NotifsOutHandlerProto::new(p), e)).collect(), + in_handlers: list.clone().into_iter().map(|(p, _)| NotifsInHandlerProto::new(p)).collect(), + out_handlers, legacy: LegacyProtoHandlerProto::new(legacy), } } @@ -266,7 +277,7 @@ impl ProtocolsHandler for NotifsHandler { fn listen_protocol(&self) -> SubstreamProtocol { let in_handlers = self.in_handlers.iter() - .map(|h| h.0.listen_protocol().into_upgrade().1) + .map(|h| h.listen_protocol().into_upgrade().1) .collect::>(); let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); @@ -279,7 +290,7 @@ impl ProtocolsHandler for NotifsHandler { ) { match out { EitherOutput::First((out, num)) => - self.in_handlers[num].0.inject_fully_negotiated_inbound(out), + self.in_handlers[num].inject_fully_negotiated_inbound(out), EitherOutput::Second(out) => self.legacy.inject_fully_negotiated_inbound(out), } @@ -292,7 +303,7 @@ impl ProtocolsHandler for NotifsHandler { ) { match (out, num) { (EitherOutput::First(out), Some(num)) => - self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()), + self.out_handlers[num].inject_fully_negotiated_outbound(out, ()), (EitherOutput::Second(out), None) => self.legacy.inject_fully_negotiated_outbound(out, ()), _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), @@ -302,54 +313,53 @@ impl ProtocolsHandler for NotifsHandler { fn inject_event(&mut self, message: NotifsHandlerIn) { match message { NotifsHandlerIn::Enable => { + if let EnabledState::Enabled = self.enabled { + debug!("enabling already-enabled handler"); + } self.enabled = EnabledState::Enabled; self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for (handler, _) in &mut self.out_handlers { + for handler in &mut self.out_handlers { handler.inject_event(NotifsOutHandlerIn::Enable { initial_message: vec![] }); } for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Accept(vec![])); + self.in_handlers[num].inject_event(NotifsInHandlerIn::Accept(vec![])); } }, NotifsHandlerIn::Disable => { + if let EnabledState::Disabled = self.enabled { + debug!("disabling already-disabled handler"); + } self.legacy.inject_event(LegacyProtoHandlerIn::Disable); // The notifications protocols start in the disabled state. If we were in the // "Initial" state, then we shouldn't disable the notifications protocols again. if self.enabled != EnabledState::Initial { - for (handler, _) in &mut self.out_handlers { + for handler in &mut self.out_handlers { handler.inject_event(NotifsOutHandlerIn::Disable); } } self.enabled = EnabledState::Disabled; for num in self.pending_in.drain(..) { - self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); + self.in_handlers[num].inject_event(NotifsInHandlerIn::Refuse); } }, NotifsHandlerIn::SendLegacy { message } => self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), - NotifsHandlerIn::SendNotification { message, engine_id, protocol_name } => { - for (handler, ngn_id) in &mut self.out_handlers { + NotifsHandlerIn::SendNotification { message, encoded_fallback_message, protocol_name } => { + for handler in &mut self.out_handlers { if handler.protocol_name() != &protocol_name[..] { - break; + continue; } if handler.is_open() { handler.inject_event(NotifsOutHandlerIn::Send(message)); return; - } else { - debug_assert_eq!(engine_id, *ngn_id); } } - let message = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { - engine_id, - data: message, - }); - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { - message: message.encode() + message: encoded_fallback_message, }); }, } @@ -362,21 +372,21 @@ impl ProtocolsHandler for NotifsHandler { ) { match (err, num) { (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => - self.out_handlers[num].0.inject_dial_upgrade_error( + self.out_handlers[num].inject_dial_upgrade_error( (), ProtocolsHandlerUpgrErr::Timeout ), (ProtocolsHandlerUpgrErr::Timeout, None) => self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), (ProtocolsHandlerUpgrErr::Timer, Some(num)) => - self.out_handlers[num].0.inject_dial_upgrade_error( + self.out_handlers[num].inject_dial_upgrade_error( (), ProtocolsHandlerUpgrErr::Timer ), (ProtocolsHandlerUpgrErr::Timer, None) => self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => - self.out_handlers[num].0.inject_dial_upgrade_error( + self.out_handlers[num].inject_dial_upgrade_error( (), ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) ), @@ -386,7 +396,7 @@ impl ProtocolsHandler for NotifsHandler { ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) ), (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), Some(num)) => - self.out_handlers[num].0.inject_dial_upgrade_error( + self.out_handlers[num].inject_dial_upgrade_error( (), ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) ), @@ -407,7 +417,7 @@ impl ProtocolsHandler for NotifsHandler { return KeepAlive::Yes; } - for (handler, _) in &self.in_handlers { + for handler in &self.in_handlers { let val = handler.connection_keep_alive(); if val.is_yes() { return KeepAlive::Yes; @@ -415,7 +425,7 @@ impl ProtocolsHandler for NotifsHandler { if ret < val { ret = val; } } - for (handler, _) in &self.out_handlers { + for handler in &self.out_handlers { let val = handler.connection_keep_alive(); if val.is_yes() { return KeepAlive::Yes; @@ -432,7 +442,39 @@ impl ProtocolsHandler for NotifsHandler { ) -> Poll< ProtocolsHandlerEvent > { - for (handler_num, (handler, engine_id)) in self.in_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::B), + info: None, + }), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { endpoint, .. }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Open { endpoint } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Closed { endpoint, reason } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Clogged { messages } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + )), + ProtocolsHandlerEvent::Close(err) => + return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), + } + } + + for (handler_num, handler) in self.in_handlers.iter_mut().enumerate() { while let Poll::Ready(ev) = handler.poll(cx) { match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => @@ -453,7 +495,6 @@ impl ProtocolsHandler for NotifsHandler { if self.legacy.is_open() { let msg = NotifsHandlerOut::Notification { message, - engine_id: *engine_id, protocol_name: handler.protocol_name().to_owned().into(), }; return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); @@ -463,7 +504,7 @@ impl ProtocolsHandler for NotifsHandler { } } - for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { + for (handler_num, handler) in self.out_handlers.iter_mut().enumerate() { while let Poll::Ready(ev) = handler.poll(cx) { match ev { ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => @@ -486,38 +527,6 @@ impl ProtocolsHandler for NotifsHandler { } } - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::B), - info: None, - }), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { .. }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { reason } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Clogged { messages } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::ProtocolError { is_severe, error } - )), - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), - } - } - Poll::Pending } } diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs index a2d2fc9246d1c79b761d5e5eb1ce375fba33b49a..bc84fd847c9c4ba44dbffcf5a7735c18c39f7776 100644 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -40,9 +40,8 @@ use std::{pin::Pin, task::{Context, Poll}}; /// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific /// to Substrate on that single connection. /// -/// Note that there can be multiple instance of this struct simultaneously for same peer. However -/// if that happens, only one main instance can communicate with the outer layers of the code. In -/// other words, the outer layers of the code only ever see one handler. +/// Note that there can be multiple instance of this struct simultaneously for same peer, +/// if there are multiple established connections to the peer. /// /// ## State of the handler /// @@ -61,6 +60,7 @@ use std::{pin::Pin, task::{Context, Poll}}; /// these states. For example, if the handler reports a network misbehaviour, it will close the /// substreams but it is the role of the user to send a `Disabled` event if it wants the connection /// to close. Otherwise, the handler will try to reopen substreams. +/// /// The handler starts in the "Initializing" state and must be transitionned to Enabled or Disabled /// as soon as possible. /// @@ -111,7 +111,7 @@ impl IntoProtocolsHandler for LegacyProtoHandlerProto { fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { LegacyProtoHandler { protocol: self.protocol, - endpoint: connected_point.to_endpoint(), + endpoint: connected_point.clone(), remote_peer_id: remote_peer_id.clone(), state: ProtocolState::Init { substreams: SmallVec::new(), @@ -136,7 +136,7 @@ pub struct LegacyProtoHandler { /// Whether we are the connection dialer or listener. Used to determine who, between the local /// node and the remote node, has priority. - endpoint: Endpoint, + endpoint: ConnectedPoint, /// Queue of events to send to the outside. /// @@ -218,12 +218,16 @@ pub enum LegacyProtoHandlerOut { CustomProtocolOpen { /// Version of the protocol that has been opened. version: u8, + /// The connected endpoint. + endpoint: ConnectedPoint, }, /// Closed a custom protocol with the remote. CustomProtocolClosed { /// Reason why the substream closed, for diagnostic purposes. reason: Cow<'static, str>, + /// The connected endpoint. + endpoint: ConnectedPoint, }, /// Receives a message on a custom protocol substream. @@ -272,7 +276,7 @@ impl LegacyProtoHandler { ProtocolState::Init { substreams: incoming, .. } => { if incoming.is_empty() { - if let Endpoint::Dialer = self.endpoint { + if let ConnectedPoint::Dialer { .. } = self.endpoint { self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol: SubstreamProtocol::new(self.protocol.clone()), info: (), @@ -281,10 +285,10 @@ impl LegacyProtoHandler { ProtocolState::Opening { deadline: Delay::new(Duration::from_secs(60)) } - } else { let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].protocol_version() + version: incoming[0].protocol_version(), + endpoint: self.endpoint.clone() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); ProtocolState::Normal { @@ -404,6 +408,7 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "All substreams have been closed by the remote".into(), + endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -416,6 +421,7 @@ impl LegacyProtoHandler { if substreams.is_empty() { let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: format!("Error on the last substream: {:?}", err).into(), + endpoint: self.endpoint.clone() }; self.state = ProtocolState::Disabled { shutdown: shutdown.into_iter().collect(), @@ -479,7 +485,8 @@ impl LegacyProtoHandler { ProtocolState::Opening { .. } => { let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version() + version: substream.protocol_version(), + endpoint: self.endpoint.clone() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); ProtocolState::Normal { diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs index 4e16fb1af419f4aebb2748b09c477119f1165309..83923154bd6256de3bb4e983796807093f7019ad 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -36,7 +36,7 @@ use libp2p::swarm::{ }; use log::{error, warn}; use smallvec::SmallVec; -use std::{borrow::Cow, fmt, pin::Pin, str, task::{Context, Poll}}; +use std::{borrow::Cow, fmt, pin::Pin, task::{Context, Poll}}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -72,7 +72,7 @@ pub struct NotifsInHandler { } /// Event that can be received by a `NotifsInHandler`. -#[derive(Debug)] +#[derive(Debug, Clone)] pub enum NotifsInHandlerIn { /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send /// to the remote. @@ -156,16 +156,19 @@ impl ProtocolsHandler for NotifsInHandler { &mut self, (msg, proto): >::Output ) { + // If a substream already exists, we drop it and replace it with the new incoming one. if self.substream.is_some() { - warn!( - target: "sub-libp2p", - "Received duplicate inbound notifications substream for {:?}", - str::from_utf8(self.in_protocol.protocol_name()), - ); - return; + self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); } + // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" + // to the remote and force-close the substream. It might seem like an unclean way to get + // rid of a substream. However, keep in mind that it is invalid for the remote to open + // multiple such substreams, and therefore sending a "RST" is the correct thing to do. + // Also note that we have already closed our writing side during the initial handshake, + // and we can't close "more" than that anyway. self.substream = Some(proto); + self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); self.pending_accept_refuses = self.pending_accept_refuses .checked_add(1) @@ -235,8 +238,15 @@ impl ProtocolsHandler for NotifsInHandler { match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))), + Some(Poll::Ready(Some(Ok(msg)))) => { + if self.pending_accept_refuses != 0 { + warn!( + target: "sub-libp2p", + "Bad state in inbound-only handler: notif before accepting substream" + ); + } + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) + }, Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { self.substream = None; return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs index 8c64491d997171df73606c2852765c10c9f3f21b..b5d6cd61ada2a9e54bfa5873e0f7992ab1dfcfcb 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -33,7 +33,8 @@ use libp2p::swarm::{ SubstreamProtocol, NegotiatedSubstream, }; -use log::error; +use log::{debug, warn, error}; +use prometheus_endpoint::Histogram; use smallvec::SmallVec; use std::{borrow::Cow, fmt, mem, pin::Pin, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; @@ -56,14 +57,17 @@ const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); pub struct NotifsOutHandlerProto { /// Name of the protocol to negotiate. protocol_name: Cow<'static, [u8]>, + /// Optional Prometheus histogram to report message queue size variations. + queue_size_report: Option, } impl NotifsOutHandlerProto { /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the /// notifications substream. - pub fn new(protocol_name: impl Into>) -> Self { + pub fn new(protocol_name: impl Into>, queue_size_report: Option) -> Self { NotifsOutHandlerProto { protocol_name: protocol_name.into(), + queue_size_report, } } } @@ -75,12 +79,14 @@ impl IntoProtocolsHandler for NotifsOutHandlerProto { DeniedUpgrade } - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + fn into_handler(self, peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { NotifsOutHandler { protocol_name: self.protocol_name, when_connection_open: Instant::now(), + queue_size_report: self.queue_size_report, state: State::Disabled, events_queue: SmallVec::new(), + peer_id: peer_id.clone(), } } } @@ -103,11 +109,17 @@ pub struct NotifsOutHandler { /// When the connection with the remote has been successfully established. when_connection_open: Instant, + /// Optional prometheus histogram to report message queue sizes variations. + queue_size_report: Option, + /// Queue of events to send to the outside. /// /// This queue must only ever be modified to insert elements at the back, or remove the first /// element. events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + + /// Who we are connected to. + peer_id: PeerId, } /// Our relationship with the node we're connected to. @@ -244,8 +256,8 @@ impl ProtocolsHandler for NotifsOutHandler { // Any other situation should never happen. State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("Notifications handler in a poisoned state"), + error!("☎️ State mismatch in notifications handler: substream already open"), + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), } } @@ -268,9 +280,9 @@ impl ProtocolsHandler for NotifsOutHandler { // be recovered. When in doubt, let's drop the existing substream and // open a new one. if sub.close().now_or_never().is_none() { - log::warn!( + warn!( target: "sub-libp2p", - "Improperly closed outbound notifications substream" + "📞 Improperly closed outbound notifications substream" ); } @@ -281,37 +293,47 @@ impl ProtocolsHandler for NotifsOutHandler { }); self.state = State::Opening { initial_message }; }, - State::Opening { .. } | State::Refused | State::Open { .. } => - error!("Tried to enable notifications handler that was already enabled"), + st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { + debug!(target: "sub-libp2p", + "Tried to enable notifications handler that was already enabled"); + self.state = st; + } State::Poisoned => error!("Notifications handler in a poisoned state"), } } NotifsOutHandlerIn::Disable => { match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - error!("Tried to disable notifications handler that was already disabled"), + st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { + debug!(target: "sub-libp2p", + "Tried to disable notifications handler that was already disabled"); + self.state = st; + } State::Opening { .. } => self.state = State::DisabledOpening, State::Refused => self.state = State::Disabled, State::Open { substream, .. } => self.state = State::DisabledOpen(substream), - State::Poisoned => error!("Notifications handler in a poisoned state"), + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), } } NotifsOutHandlerIn::Send(msg) => if let State::Open { substream, .. } = &mut self.state { - if let Some(Ok(_)) = substream.send(msg).now_or_never() { - } else { - log::warn!( + if substream.push_message(msg).is_err() { + warn!( target: "sub-libp2p", - "Failed to push message to queue, dropped it" + "📞 Notifications queue with peer {} is full, dropped message (protocol: {:?})", + self.peer_id, + self.protocol_name, ); } + if let Some(metric) = &self.queue_size_report { + metric.observe(substream.queue_len() as f64); + } } else { // This is an API misuse. - log::warn!( + warn!( target: "sub-libp2p", - "Tried to send a notification on a disabled handler" + "📞 Tried to send a notification on a disabled handler" ); }, } @@ -321,14 +343,14 @@ impl ProtocolsHandler for NotifsOutHandler { match mem::replace(&mut self.state, State::Poisoned) { State::Disabled => {}, State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("State mismatch in NotificationsOut"), + error!("☎️ State mismatch in NotificationsOut"), State::Opening { .. } => { self.state = State::Refused; let ev = NotifsOutHandlerOut::Refused; self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); }, State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("Notifications handler in a poisoned state"), + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), } } diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index b8436e2c7f704bf2c4d5664c6908fce96c80d65c..1bc6e745f887685af41a6c9cfdfc05da84af7918 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -18,7 +18,7 @@ use futures::{prelude::*, ready}; use codec::{Encode, Decode}; -use libp2p::core::nodes::listeners::ListenerId; +use libp2p::core::connection::{ConnectionId, ListenerId}; use libp2p::core::ConnectedPoint; use libp2p::swarm::{Swarm, ProtocolsHandler, IntoProtocolsHandler}; use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction}; @@ -26,7 +26,7 @@ use libp2p::{PeerId, Multiaddr, Transport}; use rand::seq::SliceRandom; use std::{error, io, task::Context, task::Poll, time::Duration}; use std::collections::HashSet; -use crate::message::{generic::BlockResponse, Message}; +use crate::protocol::message::{generic::BlockResponse, Message}; use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; use sp_test_primitives::Block; @@ -78,11 +78,11 @@ fn build_nodes() -> (Swarm, Swarm) { vec![] }, reserved_only: false, - reserved_nodes: Vec::new(), + priority_groups: Vec::new(), }); let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(&b"test"[..], &[1], peerset), + inner: GenericProto::new(&b"test"[..], &[1], peerset, None), addrs: addrs .iter() .enumerate() @@ -148,20 +148,29 @@ impl NetworkBehaviour for CustomProtoWithAddr { list } - fn inject_connected(&mut self, peer_id: PeerId, endpoint: ConnectedPoint) { - self.inner.inject_connected(peer_id, endpoint) + fn inject_connected(&mut self, peer_id: &PeerId) { + self.inner.inject_connected(peer_id) } - fn inject_disconnected(&mut self, peer_id: &PeerId, endpoint: ConnectedPoint) { - self.inner.inject_disconnected(peer_id, endpoint) + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.inner.inject_disconnected(peer_id) } - fn inject_node_event( + fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.inner.inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { + self.inner.inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_event( &mut self, peer_id: PeerId, + connection: ConnectionId, event: <::Handler as ProtocolsHandler>::OutEvent ) { - self.inner.inject_node_event(peer_id, event) + self.inner.inject_event(peer_id, connection, event) } fn poll( @@ -177,10 +186,6 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.poll(cx, params) } - fn inject_replaced(&mut self, peer_id: PeerId, closed_endpoint: ConnectedPoint, new_endpoint: ConnectedPoint) { - self.inner.inject_replaced(peer_id, closed_endpoint, new_endpoint) - } - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { self.inner.inject_addr_reach_failure(peer_id, addr, error) } @@ -205,8 +210,8 @@ impl NetworkBehaviour for CustomProtoWithAddr { self.inner.inject_listener_error(id, err); } - fn inject_listener_closed(&mut self, id: ListenerId) { - self.inner.inject_listener_closed(id); + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.inner.inject_listener_closed(id, reason); } } @@ -245,7 +250,7 @@ fn two_nodes_transfer_lots_of_packets() { loop { match ready!(service2.poll_next_unpin(cx)) { Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - Some(GenericProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::LegacyMessage { message, .. }) => { match Message::::decode(&mut &message[..]).unwrap() { Message::::BlockResponse(BlockResponse { id: _, blocks }) => { assert!(blocks.is_empty()); @@ -315,7 +320,7 @@ fn basic_two_nodes_requests_in_parallel() { loop { match ready!(service2.poll_next_unpin(cx)) { Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - Some(GenericProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::LegacyMessage { message, .. }) => { let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); to_receive.remove(pos); if to_receive.is_empty() { @@ -345,13 +350,21 @@ fn reconnect_after_disconnect() { let mut service1_state = ServiceState::NotConnected; let mut service2_state = ServiceState::NotConnected; - // Run the events loops. - futures::executor::block_on(future::poll_fn(|cx| -> Poll> { + futures::executor::block_on(async move { loop { - let mut service1_not_ready = false; + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), + } + }; - match service1.poll_next_unpin(cx) { - Poll::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { + match event { + future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { match service1_state { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; @@ -363,19 +376,14 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Poll::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { + future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | ServiceState::Disconnected => panic!(), } }, - Poll::Pending => service1_not_ready = true, - _ => panic!() - } - - match service2.poll_next_unpin(cx) { - Poll::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { + future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { match service2_state { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; @@ -387,43 +395,43 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Poll::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { + future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | ServiceState::Disconnected => panic!(), } }, - Poll::Pending if service1_not_ready => break, - Poll::Pending => {} - _ => panic!() + _ => {} } - } - if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { - Poll::Ready(Ok(())) - } else { - Poll::Pending - } - })).unwrap(); - - // Do a second 3-seconds run to make sure we don't get disconnected immediately again. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - futures::executor::block_on(future::poll_fn(|cx| -> Poll> { - match service1.poll_next_unpin(cx) { - Poll::Pending => {}, - _ => panic!() + if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { + break; + } } - match service2.poll_next_unpin(cx) { - Poll::Pending => {}, - _ => panic!() - } + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, + } + }; - if let Poll::Ready(()) = delay.poll_unpin(cx) { - Poll::Ready(Ok(())) - } else { - Poll::Pending + match event { + GenericProtoOut::CustomProtocolOpen { .. } | + GenericProtoOut::CustomProtocolClosed { .. } => panic!(), + _ => {} + } } - })).unwrap(); + }); } diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index ddc07b5d6f3d6b2ffefe8fec47cee78768a1495a..f626110a3346cd414bab6a936b6f9fcd6290bd76 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -38,13 +38,12 @@ use futures::{prelude::*, ready}; use futures_codec::Framed; use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; use log::error; -use std::{borrow::Cow, collections::VecDeque, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use std::{borrow::Cow, collections::VecDeque, convert::TryFrom as _, io, iter, mem, pin::Pin, task::{Context, Poll}}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. const MAX_HANDSHAKE_SIZE: usize = 1024; -/// Maximum number of buffered messages before we consider the remote unresponsive and kill the -/// substream. +/// Maximum number of buffered messages before we refuse to accept more. const MAX_PENDING_MESSAGES: usize = 256; /// Upgrade that accepts a substream, sends back a status message, then becomes a unidirectional @@ -164,12 +163,9 @@ where TSubstream: AsyncRead + AsyncWrite, { /// Sends the handshake in order to inform the remote that we accept the substream. pub fn send_handshake(&mut self, message: impl Into>) { - match self.handshake { - NotificationsInSubstreamHandshake::NotSent => {} - _ => { - error!(target: "sub-libp2p", "Tried to send handshake twice"); - return; - } + if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { + error!(target: "sub-libp2p", "Tried to send handshake twice"); + return; } self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); @@ -189,8 +185,10 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { NotificationsInSubstreamHandshake::Sent => return Stream::poll_next(this.socket.as_mut(), cx), - NotificationsInSubstreamHandshake::NotSent => - return Poll::Pending, + NotificationsInSubstreamHandshake::NotSent => { + *this.handshake = NotificationsInSubstreamHandshake::NotSent; + return Poll::Pending + }, NotificationsInSubstreamHandshake::PendingSend(msg) => match Sink::poll_ready(this.socket.as_mut(), cx) { Poll::Ready(_) => { @@ -200,15 +198,19 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin, Err(err) => return Poll::Ready(Some(Err(err))), } }, - Poll::Pending => - *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg), + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); + return Poll::Pending + } }, NotificationsInSubstreamHandshake::Close => match Sink::poll_close(this.socket.as_mut(), cx)? { Poll::Ready(()) => *this.handshake = NotificationsInSubstreamHandshake::Sent, - Poll::Pending => - *this.handshake = NotificationsInSubstreamHandshake::Close, + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Close; + return Poll::Pending + } }, } } @@ -277,6 +279,25 @@ where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, } } +impl NotificationsOutSubstream { + /// Returns the number of items in the queue, capped to `u32::max_value()`. + pub fn queue_len(&self) -> u32 { + u32::try_from(self.messages_queue.len()).unwrap_or(u32::max_value()) + } + + /// Push a message to the queue of messages. + /// + /// This has the same effect as the `Sink::start_send` implementation. + pub fn push_message(&mut self, item: Vec) -> Result<(), NotificationsOutError> { + if self.messages_queue.len() >= MAX_PENDING_MESSAGES { + return Err(NotificationsOutError::Clogged); + } + + self.messages_queue.push_back(item); + Ok(()) + } +} + impl Sink> for NotificationsOutSubstream where TSubstream: AsyncRead + AsyncWrite + Unpin, { @@ -287,12 +308,7 @@ impl Sink> for NotificationsOutSubstream } fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - if self.messages_queue.len() >= MAX_PENDING_MESSAGES { - return Err(NotificationsOutError::Clogged); - } - - self.messages_queue.push_back(item); - Ok(()) + self.push_message(item) } fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index d48bd4b91b1b4721c52b8b18c2cb75e740e3908e..85312b0803234e43dff213c4b1ade7f4f06a3c6c 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -29,7 +29,7 @@ use codec::{self, Encode, Decode}; use crate::{ chain::Client, config::ProtocolId, - protocol::{api, light_dispatch::TIMEOUT_REPUTATION_CHANGE} + protocol::{api, message::BlockAttributes} }; use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ @@ -37,6 +37,7 @@ use libp2p::{ ConnectedPoint, Multiaddr, PeerId, + connection::ConnectionId, upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, upgrade::{OutboundUpgrade, read_one, write_one} }, @@ -44,18 +45,23 @@ use libp2p::{ NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, + NotifyHandler, OneShotHandler, + OneShotHandlerConfig, PollParameters, - SubstreamProtocol + SubstreamProtocol, } }; use nohash_hasher::IntMap; use prost::Message; -use rustc_hex::ToHex; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; -use sp_core::storage::{ChildInfo, StorageKey}; +use sp_core::{ + storage::{ChildInfo, StorageKey}, + hexdisplay::HexDisplay, +}; +use smallvec::SmallVec; use sp_blockchain::{Error as ClientError}; use sp_runtime::{ traits::{Block, Header, NumberFor, Zero}, @@ -72,38 +78,52 @@ use std::{ use void::Void; use wasm_timer::Instant; +/// Reputation change for a peer when a request timed out. +pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); + /// Configuration options for `LightClientHandler` behaviour. #[derive(Debug, Clone)] pub struct Config { - max_data_size: usize, + max_request_size: usize, + max_response_size: usize, max_pending_requests: usize, inactivity_timeout: Duration, request_timeout: Duration, - protocol: Bytes, + light_protocol: Bytes, + block_protocol: Bytes, } impl Config { /// Create a fresh configuration with the following options: /// - /// - max. data size = 1 MiB + /// - max. request size = 1 MiB + /// - max. response size = 16 MiB /// - max. pending requests = 128 /// - inactivity timeout = 15s /// - request timeout = 15s pub fn new(id: &ProtocolId) -> Self { let mut c = Config { - max_data_size: 1024 * 1024, + max_request_size: 1 * 1024 * 1024, + max_response_size: 16 * 1024 * 1024, max_pending_requests: 128, inactivity_timeout: Duration::from_secs(15), request_timeout: Duration::from_secs(15), - protocol: Bytes::new(), + light_protocol: Bytes::new(), + block_protocol: Bytes::new(), }; c.set_protocol(id); c } - /// Limit the max. length of incoming request bytes. - pub fn set_max_data_size(&mut self, v: usize) -> &mut Self { - self.max_data_size = v; + /// Limit the max. length in bytes of a request. + pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { + self.max_request_size = v; + self + } + + /// Limit the max. length in bytes of a response. + pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { + self.max_response_size = v; self } @@ -127,11 +147,18 @@ impl Config { /// Set protocol to use for upgrade negotiation. pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_bytes()); - v.extend_from_slice(b"/light/2"); - self.protocol = v.into(); + let mut vl = Vec::new(); + vl.extend_from_slice(b"/"); + vl.extend_from_slice(id.as_bytes()); + vl.extend_from_slice(b"/light/2"); + self.light_protocol = vl.into(); + + let mut vb = Vec::new(); + vb.extend_from_slice(b"/"); + vb.extend_from_slice(id.as_bytes()); + vb.extend_from_slice(b"/sync/2"); + self.block_protocol = vb.into(); + self } } @@ -165,6 +192,10 @@ pub enum Error { // used because we currently only support a subset of those. #[derive(Debug)] pub enum Request { + Body { + request: fetcher::RemoteBodyRequest, + sender: oneshot::Sender, ClientError>> + }, Header { request: fetcher::RemoteHeaderRequest, sender: oneshot::Sender> @@ -197,7 +228,8 @@ enum Reply { VecU8(Vec), VecNumberU32(Vec<(::Number, u32)>), MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header) + Header(B::Header), + Extrinsics(Vec), } /// Augments a light client request with metadata. @@ -209,25 +241,39 @@ struct RequestWrapper { retries: usize, /// The actual request. request: Request, - /// Peer information, e.g. `PeerId`. - peer: P + /// The peer to send the request to, e.g. `PeerId`. + peer: P, + /// The connection to use for sending the request. + connection: Option, } /// Information we have about some peer. #[derive(Debug)] struct PeerInfo { - address: Multiaddr, + connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, best_block: Option>, status: PeerStatus, } +impl Default for PeerInfo { + fn default() -> Self { + PeerInfo { + connections: SmallVec::new(), + best_block: None, + status: PeerStatus::Idle, + } + } +} + +type RequestId = u64; + /// A peer is either idle or busy processing a request from us. #[derive(Debug, Clone, PartialEq, Eq)] enum PeerStatus { /// The peer is available. Idle, /// We wait for the peer to return us a response for the given request ID. - BusyWith(u64), + BusyWith(RequestId), } /// The light client handler behaviour. @@ -245,9 +291,9 @@ pub struct LightClientHandler { /// Pending (local) requests. pending_requests: VecDeque>, /// Requests on their way to remote peers. - outstanding: IntMap>, + outstanding: IntMap>, /// (Local) Request ID counter - next_request_id: u64, + next_request_id: RequestId, /// Handle to use for reporting misbehaviour of peers. peerset: sc_peerset::PeersetHandle, } @@ -280,6 +326,7 @@ where /// means to determine it ourselves. pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { if let Some(info) = self.peers.get_mut(peer) { + log::trace!("new best block for {:?}: {:?}", peer, num); info.best_block = Some(num) } } @@ -294,35 +341,18 @@ where retries: retries(&req), request: req, peer: (), // we do not know the peer yet + connection: None, }; self.pending_requests.push_back(rw); Ok(()) } - fn next_request_id(&mut self) -> u64 { + fn next_request_id(&mut self) -> RequestId { let id = self.next_request_id; self.next_request_id += 1; id } - // Iterate over peers known to possess a certain block. - fn idle_peers_with_block(&mut self, num: NumberFor) -> impl Iterator + '_ { - self.peers.iter() - .filter(move |(_, info)| { - info.status == PeerStatus::Idle && info.best_block >= Some(num) - }) - .map(|(peer, _)| peer.clone()) - } - - // Iterate over peers without a known block. - fn idle_peers_with_unknown_block(&mut self) -> impl Iterator + '_ { - self.peers.iter() - .filter(|(_, info)| { - info.status == PeerStatus::Idle && info.best_block.is_none() - }) - .map(|(peer, _)| peer.clone()) - } - /// Remove the given peer. /// /// If we have a request to this peer in flight, we move it back to @@ -335,12 +365,50 @@ where retries: rw.retries, request: rw.request, peer: (), // need to find another peer + connection: None, }; self.pending_requests.push_back(rw); } self.peers.remove(peer); } + /// Prepares a request by selecting a suitable peer and connection to send it to. + /// + /// If there is currently no suitable peer for the request, the given request + /// is returned as `Err`. + fn prepare_request(&self, req: RequestWrapper) + -> Result<(PeerId, RequestWrapper), RequestWrapper> + { + let number = required_block(&req.request); + + let mut peer = None; + for (peer_id, peer_info) in self.peers.iter() { + if peer_info.status == PeerStatus::Idle { + match peer_info.best_block { + Some(n) => if n >= number { + peer = Some((peer_id, peer_info)); + break + }, + None => peer = Some((peer_id, peer_info)) + } + } + } + + if let Some((peer_id, peer_info)) = peer { + let connection = peer_info.connections.iter().next().map(|(id, _)| *id); + let rw = RequestWrapper { + timestamp: req.timestamp, + retries: req.retries, + request: req.request, + peer: peer_id.clone(), + connection, + }; + Ok((peer_id.clone(), rw)) + } else { + Err(req) + } + } + /// Process a local request's response from remote. /// /// If successful, this will give us the actual, checked data we should be @@ -349,10 +417,23 @@ where ( &mut self , peer: &PeerId , request: &Request - , response: api::v1::light::Response + , response: Response ) -> Result, Error> { log::trace!("response from {}", peer); + match response { + Response::Light(r) => self.on_response_light(peer, request, r), + Response::Block(r) => self.on_response_block(peer, request, r), + } + } + + fn on_response_light + ( &mut self + , peer: &PeerId + , request: &Request + , response: api::v1::light::Response + ) -> Result, Error> + { use api::v1::light::response::Response; match response.response { Some(Response::RemoteCallResponse(response)) => @@ -418,6 +499,32 @@ where } } + fn on_response_block + ( &mut self + , peer: &PeerId + , request: &Request + , response: api::v1::BlockResponse + ) -> Result, Error> + { + let request = if let Request::Body { request , .. } = &request { + request + } else { + return Err(Error::UnexpectedResponse); + }; + + let body: Vec<_> = match response.blocks.into_iter().next() { + Some(b) => b.body, + None => return Err(Error::UnexpectedResponse), + }; + + let body = body.into_iter() + .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) + .collect::>()?; + + let body = self.checker.check_body_proof(&request, body)?; + Ok(Reply::Extrinsics(body)) + } + fn on_remote_call_request ( &mut self , peer: &PeerId @@ -504,7 +611,7 @@ where log::trace!("remote read child request from {} ({} {} at {:?})", peer, - request.storage_key.to_hex::(), + HexDisplay::from(&request.storage_key), fmt_keys(request.keys.first(), request.keys.last()), request.block); @@ -522,7 +629,7 @@ where Err(error) => { log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, - request.storage_key.to_hex::(), + HexDisplay::from(&request.storage_key), fmt_keys(request.keys.first(), request.keys.last()), request.block, error); @@ -532,7 +639,7 @@ where } else { log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, - request.storage_key.to_hex::(), + HexDisplay::from(&request.storage_key), fmt_keys(request.keys.first(), request.keys.last()), request.block, "invalid child info and type" @@ -585,9 +692,9 @@ where log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", peer, if !request.storage_key.is_empty() { - format!("{} : {}", request.storage_key.to_hex::(), request.key.to_hex::()) + format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) } else { - request.key.to_hex::() + HexDisplay::from(&request.key).to_string() }, request.first, request.last); @@ -610,9 +717,9 @@ where log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", peer, if let Some(sk) = storage_key { - format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) + format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) } else { - key.0.to_hex::() + HexDisplay::from(&key.0).to_string() }, request.first, request.last, @@ -652,41 +759,71 @@ where fn new_handler(&mut self) -> Self::ProtocolsHandler { let p = InboundProtocol { - max_data_size: self.config.max_data_size, - protocol: self.config.protocol.clone(), + max_request_size: self.config.max_request_size, + protocol: self.config.light_protocol.clone(), }; - OneShotHandler::new(SubstreamProtocol::new(p), self.config.inactivity_timeout) + let mut cfg = OneShotHandlerConfig::default(); + cfg.inactive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p), cfg) } fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { self.peers.get(peer) - .map(|info| vec![info.address.clone()]) + .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) .unwrap_or_default() } - fn inject_connected(&mut self, peer: PeerId, info: ConnectedPoint) { + fn inject_connected(&mut self, peer: &PeerId) { + } + + fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address } => address + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), + ConnectedPoint::Dialer { address } => address.clone() }; log::trace!("peer {} connected with address {}", peer, peer_address); - let info = PeerInfo { - address: peer_address, - best_block: None, - status: PeerStatus::Idle, - }; - - self.peers.insert(peer, info); + let entry = self.peers.entry(peer.clone()).or_default(); + entry.connections.push((*conn, peer_address)); } - fn inject_disconnected(&mut self, peer: &PeerId, _: ConnectedPoint) { + fn inject_disconnected(&mut self, peer: &PeerId) { log::trace!("peer {} disconnected", peer); self.remove_peer(peer) } - fn inject_node_event(&mut self, peer: PeerId, event: Event) { + fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { + let peer_address = match info { + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, + ConnectedPoint::Dialer { address } => address + }; + + log::trace!("connection to peer {} closed: {}", peer, peer_address); + + if let Some(info) = self.peers.get_mut(peer) { + info.connections.retain(|(c, _)| c != conn) + } + + // Add any outstanding requests on the closed connection back to the + // pending requests. + if let Some(id) = self.outstanding.iter() + .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) + .map(|(id, _)| *id) + { + let rw = self.outstanding.remove(&id).expect("by (*)"); + let rw = RequestWrapper { + timestamp: rw.timestamp, + retries: rw.retries, + request: rw.request, + peer: (), // need to find another peer + connection: None, + }; + self.pending_requests.push_back(rw); + } + } + + fn inject_event(&mut self, peer: PeerId, conn: ConnectionId, event: Event) { match event { // An incoming request from remote has been received. Event::Request(request, mut stream) => { @@ -732,9 +869,10 @@ where // A response to one of our own requests has been received. Event::Response(id, response) => { if let Some(request) = self.outstanding.remove(&id) { - // We first just check if the response originates from the expected peer. + // We first just check if the response originates from the expected peer + // and connection. if request.peer != peer { - log::debug!("was expecting response from {} instead of {}", request.peer, peer); + log::debug!("Expected response from {} instead of {}.", request.peer, peer); self.outstanding.insert(id, request); self.remove_peer(&peer); self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); @@ -766,6 +904,7 @@ where retries: request.retries, request: request.request, peer: (), + connection: None, }; self.pending_requests.push_back(rw); } @@ -779,6 +918,7 @@ where retries: request.retries - 1, request: request.request, peer: (), + connection: None, }; self.pending_requests.push_back(rw) } else { @@ -818,44 +958,54 @@ where request.timestamp = Instant::now(); request.retries -= 1 } - let number = required_block(&request.request); - let available_peer = { - let p = self.idle_peers_with_block(number).next(); - if p.is_none() { - self.idle_peers_with_unknown_block().next() - } else { - p + + + match self.prepare_request(request) { + Err(request) => { + self.pending_requests.push_front(request); + log::debug!("no peer available to send request to"); + break } - }; - if let Some(peer) = available_peer { - let rq = serialize_request(&request.request); - let mut buf = Vec::with_capacity(rq.encoded_len()); - if let Err(e) = rq.encode(&mut buf) { - log::debug!("failed to serialize request: {}", e); - send_reply(Err(ClientError::RemoteFetchFailed), request.request) - } else { - let id = self.next_request_id(); - log::trace!("sending request {} to peer {}", id, peer); - let protocol = OutboundProtocol { - request: buf, - request_id: id, - max_data_size: self.config.max_data_size, - protocol: self.config.protocol.clone(), + Ok((peer, request)) => { + let request_bytes = match serialize_request(&request.request) { + Ok(bytes) => bytes, + Err(error) => { + log::debug!("failed to serialize request: {}", error); + send_reply(Err(ClientError::RemoteFetchFailed), request.request); + continue + } }; - self.peers.get_mut(&peer).map(|info| info.status = PeerStatus::BusyWith(id)); - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries, - request: request.request, - peer: peer.clone(), + + let (expected, protocol) = match request.request { + Request::Body { .. } => + (ExpectedResponseTy::Block, self.config.block_protocol.clone()), + _ => + (ExpectedResponseTy::Light, self.config.light_protocol.clone()), }; - self.outstanding.insert(id, rw); - return Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id: peer, event: protocol }) + + let peer_id = peer.clone(); + let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); + + let request_id = self.next_request_id(); + self.peers.get_mut(&peer).map(|p| p.status = PeerStatus::BusyWith(request_id)); + self.outstanding.insert(request_id, request); + + let event = OutboundProtocol { + request_id, + request: request_bytes, + expected, + max_response_size: self.config.max_response_size, + protocol, + }; + + log::trace!("sending request {} to peer {}", request_id, peer_id); + + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) } - } else { - self.pending_requests.push_front(request); - log::debug!("no peer available to send request to"); - break } } @@ -881,6 +1031,7 @@ where retries: rw.retries - 1, request: rw.request, peer: (), + connection: None, }; self.pending_requests.push_back(rw) } @@ -892,6 +1043,7 @@ where fn required_block(request: &Request) -> NumberFor { match request { + Request::Body { request, .. } => *request.header.number(), Request::Header { request, .. } => request.block, Request::Read { request, .. } => *request.header.number(), Request::ReadChild { request, .. } => *request.header.number(), @@ -902,6 +1054,7 @@ fn required_block(request: &Request) -> NumberFor { fn retries(request: &Request) -> usize { let rc = match request { + Request::Body { request, .. } => request.retry_count, Request::Header { request, .. } => request.retry_count, Request::Read { request, .. } => request.retry_count, Request::ReadChild { request, .. } => request.retry_count, @@ -911,8 +1064,20 @@ fn retries(request: &Request) -> usize { rc.unwrap_or(0) } -fn serialize_request(request: &Request) -> api::v1::light::Request { +fn serialize_request(request: &Request) -> Result, prost::EncodeError> { let request = match request { + Request::Body { request, .. } => { + let rq = api::v1::BlockRequest { + fields: u32::from(BlockAttributes::BODY.bits()), + from_block: Some(api::v1::block_request::FromBlock::Hash(request.header.hash().encode())), + to_block: Vec::new(), + direction: api::v1::Direction::Ascending as i32, + max_blocks: 1, + }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + return Ok(buf); + } Request::Header { request, .. } => { let r = api::v1::light::RemoteHeaderRequest { block: request.block.encode() }; api::v1::light::request::Request::RemoteHeaderRequest(r) @@ -955,7 +1120,10 @@ fn serialize_request(request: &Request) -> api::v1::light::Request } }; - api::v1::light::Request { request: Some(request) } + let rq = api::v1::light::Request { request: Some(request) }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + Ok(buf) } fn send_reply(result: Result, ClientError>, request: Request) { @@ -963,6 +1131,11 @@ fn send_reply(result: Result, ClientError>, request: Request< let _ = sender.send(item); // It is okay if the other end already hung up. } match request { + Request::Body { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), + } Request::Header { request, sender } => match result { Err(e) => send(Err(e), sender), Ok(Reply::Header(x)) => send(Ok(x), sender), @@ -997,7 +1170,16 @@ pub enum Event { /// Incoming request from remote and substream to use for the response. Request(api::v1::light::Request, T), /// Incoming response from remote. - Response(u64, api::v1::light::Response), + Response(RequestId, Response), +} + +/// Incoming response from remote. +#[derive(Debug, Clone)] +pub enum Response { + /// Incoming light response from remote. + Light(api::v1::light::Response), + /// Incoming block response from remote. + Block(api::v1::BlockResponse), } /// Substream upgrade protocol. @@ -1006,31 +1188,31 @@ pub enum Event { #[derive(Debug, Clone)] pub struct InboundProtocol { /// The max. request length in bytes. - max_data_size: usize, + max_request_size: usize, /// The protocol to use for upgrade negotiation. protocol: Bytes, } impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; + type Info = Bytes; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } } impl InboundUpgrade for InboundProtocol where T: AsyncRead + AsyncWrite + Unpin + Send + 'static { - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { + fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { let future = async move { - let vec = read_one(&mut s, self.max_data_size).await?; + let vec = read_one(&mut s, self.max_request_size).await?; match api::v1::light::Request::decode(&vec[..]) { Ok(r) => Ok(Event::Request(r, s)), Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) @@ -1048,39 +1230,60 @@ pub struct OutboundProtocol { /// The serialized protobuf request. request: Vec, /// Local identifier for the request. Used to associate it with a response. - request_id: u64, - /// The max. request length in bytes. - max_data_size: usize, + request_id: RequestId, + /// Kind of response expected for this request. + expected: ExpectedResponseTy, + /// The max. response length in bytes. + max_response_size: usize, /// The protocol to use for upgrade negotiation. protocol: Bytes, } +/// Type of response expected from the remote for this request. +#[derive(Debug, Clone)] +enum ExpectedResponseTy { + Light, + Block, +} + impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; + type Info = Bytes; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } } impl OutboundUpgrade for OutboundProtocol where T: AsyncRead + AsyncWrite + Unpin + Send + 'static { - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { + fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { let future = async move { write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_data_size).await?; - api::v1::light::Response::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, r)) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) + let vec = read_one(&mut s, self.max_response_size).await?; + + match self.expected { + ExpectedResponseTy::Light => { + api::v1::light::Response::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Light(r))) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + }, + ExpectedResponseTy::Block => { + api::v1::BlockResponse::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Block(r))) + .map_err(|e| { + ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) + }) + } + } }; future.boxed() } @@ -1089,9 +1292,9 @@ where fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { if let (Some(first), Some(last)) = (first, last) { if first == last { - first.to_hex::() + HexDisplay::from(first).to_string() } else { - format!("{}..{}", first.to_hex::(), last.to_hex::()) + format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) } } else { String::from("n/a") @@ -1106,7 +1309,7 @@ mod tests { use crate::{ chain::Client, config::ProtocolId, - protocol::{api, light_dispatch::tests::{DummyFetchChecker, dummy_header}} + protocol::api, }; use futures::{channel::oneshot, prelude::*}; use libp2p::{ @@ -1114,6 +1317,7 @@ mod tests { Multiaddr, core::{ ConnectedPoint, + connection::ConnectionId, identity, muxing::{StreamMuxerBox, SubstreamRef}, transport::{Transport, boxed::Boxed, memory::MemoryTransport}, @@ -1128,15 +1332,15 @@ mod tests { use sp_blockchain::{Error as ClientError}; use sp_core::storage::ChildInfo; use std::{ - collections::HashSet, + collections::{HashMap, HashSet}, io, iter::{self, FromIterator}, pin::Pin, sync::Arc, task::{Context, Poll} }; - use sp_runtime::{generic::Header, traits::BlakeTwo256}; - use super::{Event, LightClientHandler, Request, OutboundProtocol, PeerStatus}; + use sp_runtime::{generic::Header, traits::{BlakeTwo256, Block as BlockT, NumberFor}}; + use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; use void::Void; const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); @@ -1151,7 +1355,7 @@ mod tests { fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker::new(ok)); + let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); let id_key = identity::Keypair::generate_ed25519(); let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); let local_peer = id_key.public().into_peer_id(); @@ -1165,10 +1369,104 @@ mod tests { Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) } + struct DummyFetchChecker { + ok: bool, + _mark: std::marker::PhantomData + } + + impl fetcher::FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &fetcher::RemoteHeaderRequest, + header: Option, + _remote_proof: fetcher::StorageProof, + ) -> Result { + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_proof( + &self, + request: &fetcher::RemoteReadRequest, + _: fetcher::StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect() + ), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_child_proof( + &self, + request: &fetcher::RemoteReadChildRequest, + _: fetcher::StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request.keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect() + ), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_execution_proof( + &self, + _: &fetcher::RemoteCallRequest, + _: fetcher::StorageProof, + ) -> Result, ClientError> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_changes_proof( + &self, + _: &fetcher::RemoteChangesRequest, + _: fetcher::ChangesProof + ) -> Result, u32)>, ClientError> { + match self.ok { + true => Ok(vec![(100.into(), 2)]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_body_proof( + &self, + _: &fetcher::RemoteBodyRequest, + body: Vec + ) -> Result, ClientError> { + match self.ok { + true => Ok(body), + false => Err(ClientError::Backend("Test error".into())), + } + } + } + fn make_config() -> super::Config { super::Config::new(&ProtocolId::from(&b"foo"[..])) } + fn dummy_header() -> sp_test_primitives::Header { + sp_test_primitives::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + struct EmptyPollParams(PeerId); impl PollParameters for EmptyPollParams { @@ -1199,7 +1497,7 @@ mod tests { out_peers: 128, bootnodes: Vec::new(), reserved_only: false, - reserved_nodes: Vec::new(), + priority_groups: Vec::new(), }; sc_peerset::Peerset::from_config(cfg) } @@ -1211,7 +1509,7 @@ mod tests { ) -> LightClientHandler { let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker::new(ok)); + let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); LightClientHandler::new(cf, client, checker, ps) } @@ -1233,10 +1531,12 @@ mod tests { let pset = peerset(); let mut behaviour = make_behaviour(true, pset.1, make_config()); - behaviour.inject_connected(peer.clone(), empty_dialer()); + behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer); assert_eq!(1, behaviour.peers.len()); - behaviour.inject_disconnected(&peer, empty_dialer()); + behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_disconnected(&peer); assert_eq!(0, behaviour.peers.len()) } @@ -1247,8 +1547,10 @@ mod tests { let pset = peerset(); let mut behaviour = make_behaviour(true, pset.1, make_config()); - behaviour.inject_connected(peer0.clone(), empty_dialer()); - behaviour.inject_connected(peer1.clone(), empty_dialer()); + behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer0); + behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); + behaviour.inject_connected(&peer1); // We now know about two peers. assert_eq!(HashSet::from_iter(&[peer0.clone(), peer1.clone()]), behaviour.peers.keys().collect::>()); @@ -1270,7 +1572,7 @@ mod tests { assert_eq!(1, behaviour.pending_requests.len()); // The behaviour should now attempt to send the request. - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::SendEvent { peer_id, .. }) => { + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { assert!(peer_id == peer0 || peer_id == peer1) }); @@ -1310,7 +1612,9 @@ mod tests { let mut behaviour = make_behaviour(false, pset.1, make_config()); // ^--- Making sure the response data check fails. - behaviour.inject_connected(peer.clone(), empty_dialer()); + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); assert_eq!(1, behaviour.peers.len()); let chan = oneshot::channel(); @@ -1338,7 +1642,7 @@ mod tests { } }; - behaviour.inject_node_event(peer.clone(), Event::Response(request_id, response)); + behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); // More progress @@ -1354,7 +1658,9 @@ mod tests { let pset = peerset(); let mut behaviour = make_behaviour(true, pset.1, make_config()); - behaviour.inject_connected(peer.clone(), empty_dialer()); + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); assert_eq!(1, behaviour.peers.len()); assert_eq!(0, behaviour.pending_requests.len()); assert_eq!(0, behaviour.outstanding.len()); @@ -1367,7 +1673,7 @@ mod tests { } }; - behaviour.inject_node_event(peer.clone(), Event::Response(2347895932, response)); + behaviour.inject_event(peer.clone(), conn, Event::Response(2347895932, Response::Light(response))); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); @@ -1381,7 +1687,9 @@ mod tests { let pset = peerset(); let mut behaviour = make_behaviour(true, pset.1, make_config()); - behaviour.inject_connected(peer.clone(), empty_dialer()); + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); assert_eq!(1, behaviour.peers.len()); let chan = oneshot::channel(); @@ -1409,7 +1717,7 @@ mod tests { } }; - behaviour.inject_node_event(peer.clone(), Event::Response(request_id, response)); + behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); // More progress @@ -1429,10 +1737,18 @@ mod tests { let mut behaviour = make_behaviour(false, pset.1, make_config()); // ^--- Making sure the response data check fails. - behaviour.inject_connected(peer1.clone(), empty_dialer()); - behaviour.inject_connected(peer2.clone(), empty_dialer()); - behaviour.inject_connected(peer3.clone(), empty_dialer()); - behaviour.inject_connected(peer4.clone(), empty_dialer()); + let conn1 = ConnectionId::new(1); + behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); + behaviour.inject_connected(&peer1); + let conn2 = ConnectionId::new(2); + behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); + behaviour.inject_connected(&peer2); + let conn3 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); + behaviour.inject_connected(&peer3); + let conn4 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); + behaviour.inject_connected(&peer4); assert_eq!(4, behaviour.peers.len()); let mut chan = oneshot::channel(); @@ -1447,11 +1763,11 @@ mod tests { assert_eq!(1, behaviour.pending_requests.len()); assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::SendEvent { .. })); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); assert_eq!(0, behaviour.pending_requests.len()); assert_eq!(1, behaviour.outstanding.len()); - for _ in 0 .. 3 { + for i in 1 ..= 3 { // Construct an invalid response let request_id = *behaviour.outstanding.keys().next().unwrap(); let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); @@ -1461,8 +1777,9 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteCallResponse(r)) } }; - behaviour.inject_node_event(responding_peer, Event::Response(request_id, response.clone())); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::SendEvent { .. })); + let conn = ConnectionId::new(i); + behaviour.inject_event(responding_peer, conn, Event::Response(request_id, Response::Light(response.clone()))); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); assert_matches!(chan.1.try_recv(), Ok(None)) } // Final invalid response @@ -1474,7 +1791,7 @@ mod tests { response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), } }; - behaviour.inject_node_event(responding_peer, Event::Response(request_id, response)); + behaviour.inject_event(responding_peer, conn4, Event::Response(request_id, Response::Light(response))); assert_matches!(poll(&mut behaviour), Poll::Pending); assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) } @@ -1484,10 +1801,13 @@ mod tests { let pset = peerset(); let mut behaviour = make_behaviour(true, pset.1, make_config()); - behaviour.inject_connected(peer.clone(), empty_dialer()); + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); assert_eq!(1, behaviour.peers.len()); let response = match request { + Request::Body { .. } => unimplemented!(), Request::Header{..} => { let r = api::v1::light::RemoteHeaderResponse { header: dummy_header().encode(), @@ -1532,12 +1852,12 @@ mod tests { assert_eq!(1, behaviour.pending_requests.len()); assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::SendEvent { .. })); + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); assert_eq!(0, behaviour.pending_requests.len()); assert_eq!(1, behaviour.outstanding.len()); assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); - behaviour.inject_node_event(peer.clone(), Event::Response(1, response)); + behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response))); poll(&mut behaviour); diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs deleted file mode 100644 index 8146172e1590868a2dcbd11de747701d7abbe8d7..0000000000000000000000000000000000000000 --- a/client/network/src/protocol/light_dispatch.rs +++ /dev/null @@ -1,1313 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Light client requests service. -//! -//! Handles requests for data coming from our local light client and that must be answered by -//! nodes on the network. - -use std::collections::{HashMap, VecDeque}; -use std::sync::Arc; -use std::time::Duration; -use wasm_timer::Instant; -use log::{trace, info}; -use futures::channel::oneshot::{Sender as OneShotSender}; -use linked_hash_map::{Entry, LinkedHashMap}; -use sp_blockchain::Error as ClientError; -use sc_client_api::{FetchChecker, RemoteHeaderRequest, - RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, - RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; -use crate::protocol::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; -use libp2p::PeerId; -use crate::config::Roles; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sc_peerset::ReputationChange; - -/// Remote request timeout. -const REQUEST_TIMEOUT: Duration = Duration::from_secs(15); -/// Default request retry count. -const RETRY_COUNT: usize = 1; -/// Reputation change for a peer when a request timed out. -pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); - -/// Trait used by the `LightDispatch` service to communicate messages back to the network. -pub trait LightDispatchNetwork { - /// Adjusts the reputation of the given peer. - fn report_peer(&mut self, who: &PeerId, reputation_change: ReputationChange); - - /// Disconnect from the given peer. Used in case of misbehaviour. - fn disconnect_peer(&mut self, who: &PeerId); - - /// Send to `who` a request for a header. - fn send_header_request(&mut self, who: &PeerId, id: RequestId, block: <::Header as HeaderT>::Number); - - /// Send to `who` a read request. - fn send_read_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - keys: Vec>, - ); - - /// Send to `who` a child read request. - fn send_read_child_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - storage_key: Vec, - child_info: Vec, - child_type: u32, - keys: Vec>, - ); - - /// Send to `who` a call request. - fn send_call_request( - &mut self, - who: &PeerId, - id: RequestId, - block: ::Hash, - method: String, - data: Vec - ); - - /// Send to `who` a changes request. - fn send_changes_request( - &mut self, - who: &PeerId, - id: RequestId, - first: ::Hash, - last: ::Hash, - min: ::Hash, - max: ::Hash, - storage_key: Option>, - key: Vec, - ); - - /// Send to `who` a body request. - fn send_body_request( - &mut self, - who: &PeerId, - id: RequestId, - fields: BlockAttributes, - from: FromBlock<::Hash, <::Header as HeaderT>::Number>, - to: Option, - direction: Direction, - max: Option - ); -} - -/// Light client requests service. Dispatches requests to appropriate peers. -pub struct LightDispatch { - /// Verifies that responses are correct. Passed at initialization. - checker: Arc>, - /// Numeric ID to assign to the next outgoing request. Used to assign responses to their - /// corresponding request. - next_request_id: u64, - /// Requests that we have yet to send out on the network. - pending_requests: VecDeque>, - /// List of nodes to which we have sent a request and that are yet to answer. - active_peers: LinkedHashMap>, - /// List of nodes that we know of that aren't doing anything and that are available for new - /// requests. - idle_peers: VecDeque, - /// Best known block for each node in `active_peers` and `idle_peers`. - best_blocks: HashMap>, -} - -struct Request { - id: u64, - /// When the request got created or sent out to the network. - timestamp: Instant, - /// Number of remaining attempts to fulfill this request. If it reaches 0, we interrupt the - /// attempt. - retry_count: usize, - data: RequestData, -} - -/// One request for data made by the `Client`. -/// -/// Contains a `Sender` where to send the result. -pub(crate) enum RequestData { - RemoteBody(RemoteBodyRequest, OneShotSender, ClientError>>), - RemoteHeader(RemoteHeaderRequest, OneShotSender>), - RemoteRead( - RemoteReadRequest, - OneShotSender, Option>>, ClientError>>, - ), - RemoteReadChild( - RemoteReadChildRequest, - OneShotSender, Option>>, ClientError>> - ), - RemoteCall(RemoteCallRequest, OneShotSender, ClientError>>), - RemoteChanges( - RemoteChangesRequest, - OneShotSender, u32)>, ClientError>> - ), -} - -enum Accept { - Ok, - CheckFailed(ClientError, RequestData), - Unexpected(RequestData), -} - -/// Dummy implementation of `FetchChecker` that always assumes that responses are bad. -/// -/// Considering that it is the responsibility of the client to build the fetcher, it can use this -/// implementation if it knows that it will never perform any request. -#[derive(Default, Clone)] -pub struct AlwaysBadChecker; - -impl FetchChecker for AlwaysBadChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - _remote_header: Option, - _remote_proof: StorageProof, - ) -> Result { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_read_proof( - &self, - _request: &RemoteReadRequest, - _remote_proof: StorageProof, - ) -> Result,Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_read_child_proof( - &self, - _request: &RemoteReadChildRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_execution_proof( - &self, - _request: &RemoteCallRequest, - _remote_proof: StorageProof, - ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_changes_proof( - &self, - _request: &RemoteChangesRequest, - _remote_proof: ChangesProof - ) -> Result, u32)>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_body_proof( - &self, - _request: &RemoteBodyRequest, - _body: Vec - ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } -} - -impl LightDispatch where - B::Header: HeaderT, -{ - /// Creates new light client requests processor. - pub fn new(checker: Arc>) -> Self { - LightDispatch { - checker, - next_request_id: 0, - pending_requests: VecDeque::new(), - active_peers: LinkedHashMap::new(), - idle_peers: VecDeque::new(), - best_blocks: HashMap::new(), - } - } - - /// Inserts a new request in the list of requests to execute. - pub(crate) fn add_request(&mut self, network: impl LightDispatchNetwork, data: RequestData) { - self.insert(RETRY_COUNT, data); - self.dispatch(network); - } - - /// Inserts a new request in the list of requests to execute. - fn insert(&mut self, retry_count: usize, data: RequestData) { - let request_id = self.next_request_id; - self.next_request_id += 1; - - self.pending_requests.push_back(Request { - id: request_id, - timestamp: Instant::now(), - retry_count, - data, - }); - } - - /// Try to accept response from given peer. - fn accept_response( - &mut self, - rtype: &str, - mut network: impl LightDispatchNetwork, - peer: PeerId, - request_id: u64, - try_accept: impl FnOnce(Request, &Arc>) -> Accept - ) { - let request = match self.remove(peer.clone(), request_id) { - Some(request) => request, - None => { - info!("Invalid remote {} response from peer {}", rtype, peer); - network.report_peer(&peer, ReputationChange::new_fatal("Invalid remote response")); - network.disconnect_peer(&peer); - self.remove_peer(&peer); - return; - }, - }; - - let retry_count = request.retry_count; - let (retry_count, retry_request_data) = match try_accept(request, &self.checker) { - Accept::Ok => (retry_count, None), - Accept::CheckFailed(error, retry_request_data) => { - info!("Failed to check remote {} response from peer {}: {}", rtype, peer, error); - network.report_peer(&peer, ReputationChange::new_fatal("Failed remote response check")); - network.disconnect_peer(&peer); - self.remove_peer(&peer); - - if retry_count > 0 { - (retry_count - 1, Some(retry_request_data)) - } else { - trace!(target: "sync", "Failed to get remote {} response for given number of retries", rtype); - retry_request_data.fail(ClientError::RemoteFetchFailed.into()); - (0, None) - } - }, - Accept::Unexpected(retry_request_data) => { - info!("Unexpected response to remote {} from peer", rtype); - network.report_peer(&peer, ReputationChange::new_fatal("Unexpected remote response")); - network.disconnect_peer(&peer); - self.remove_peer(&peer); - - (retry_count, Some(retry_request_data)) - }, - }; - - if let Some(request_data) = retry_request_data { - self.insert(retry_count, request_data); - } - - self.dispatch(network); - } - - /// Call this when we connect to a node on the network. - pub fn on_connect( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - role: Roles, - best_number: NumberFor - ) { - if !role.is_full() { - return; - } - - self.idle_peers.push_back(peer.clone()); - self.best_blocks.insert(peer, best_number); - - self.dispatch(network); - } - - /// Sets the best seen block for the given node. - pub fn update_best_number(&mut self, network: impl LightDispatchNetwork, peer: PeerId, best_number: NumberFor) { - self.best_blocks.insert(peer, best_number); - self.dispatch(network); - } - - /// Call this when we disconnect from a node. - pub fn on_disconnect(&mut self, network: impl LightDispatchNetwork, peer: &PeerId) { - self.remove_peer(peer); - self.dispatch(network); - } - - /// Must be called periodically in order to perform maintenance. - pub fn maintain_peers(&mut self, mut network: impl LightDispatchNetwork) { - let now = Instant::now(); - - loop { - match self.active_peers.front() { - Some((_, request)) if now - request.timestamp >= REQUEST_TIMEOUT => (), - _ => break, - } - - let (bad_peer, request) = self.active_peers.pop_front().expect("front() is Some as checked above"); - self.pending_requests.push_front(request); - network.report_peer(&bad_peer, ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "Light request timeout")); - network.disconnect_peer(&bad_peer); - } - - self.dispatch(network); - } - - /// Handles a remote header response message from on the network. - pub fn on_remote_header_response( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - response: message::RemoteHeaderResponse - ) { - self.accept_response("header", network, peer, response.id, |request, checker| match request.data { - RequestData::RemoteHeader(request, sender) => match checker.check_header_proof( - &request, - response.header, - response.proof - ) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteHeader(request, sender)), - }, - data => Accept::Unexpected(data), - }) - } - - /// Handles a remote read response message from on the network. - pub fn on_remote_read_response( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - response: message::RemoteReadResponse - ) { - self.accept_response("read", network, peer, response.id, |request, checker| match request.data { - RequestData::RemoteRead(request, sender) => { - match checker.check_read_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed( - error, - RequestData::RemoteRead(request, sender) - ), - }}, - RequestData::RemoteReadChild(request, sender) => { - match checker.check_read_child_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed( - error, - RequestData::RemoteReadChild(request, sender) - ), - }}, - data => Accept::Unexpected(data), - }) - } - - /// Handles a remote call response message from on the network. - pub fn on_remote_call_response( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - response: message::RemoteCallResponse - ) { - self.accept_response("call", network, peer, response.id, |request, checker| match request.data { - RequestData::RemoteCall(request, sender) => match checker.check_execution_proof(&request, response.proof) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteCall(request, sender)), - }, - data => Accept::Unexpected(data), - }) - } - - /// Handles a remote changes response message from on the network. - pub fn on_remote_changes_response( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - response: message::RemoteChangesResponse, B::Hash> - ) { - self.accept_response("changes", network, peer, response.id, |request, checker| match request.data { - RequestData::RemoteChanges(request, sender) => match checker.check_changes_proof( - &request, ChangesProof { - max_block: response.max, - proof: response.proof, - roots: response.roots.into_iter().collect(), - roots_proof: response.roots_proof, - }) { - Ok(response) => { - // we do not bother if receiver has been dropped already - let _ = sender.send(Ok(response)); - Accept::Ok - }, - Err(error) => Accept::CheckFailed(error, RequestData::RemoteChanges(request, sender)), - }, - data => Accept::Unexpected(data), - }) - } - - /// Handles a remote body response message from on the network. - pub fn on_remote_body_response( - &mut self, - network: impl LightDispatchNetwork, - peer: PeerId, - response: message::BlockResponse - ) { - self.accept_response("body", network, peer, response.id, |request, checker| match request.data { - RequestData::RemoteBody(request, sender) => { - let mut bodies: Vec<_> = response - .blocks - .into_iter() - .filter_map(|b| b.body) - .collect(); - - // Number of bodies are hardcoded to 1 for valid `RemoteBodyResponses` - if bodies.len() != 1 { - return Accept::CheckFailed( - "RemoteBodyResponse: invalid number of blocks".into(), - RequestData::RemoteBody(request, sender), - ) - } - let body = bodies.remove(0); - - match checker.check_body_proof(&request, body) { - Ok(body) => { - let _ = sender.send(Ok(body)); - Accept::Ok - } - Err(error) => Accept::CheckFailed(error, RequestData::RemoteBody(request, sender)), - } - } - other => Accept::Unexpected(other), - }) - } - - pub fn is_light_response(&self, peer: &PeerId, request_id: message::RequestId) -> bool { - self.active_peers.get(peer).map_or(false, |r| r.id == request_id) - } - - fn remove(&mut self, peer: PeerId, id: u64) -> Option> { - match self.active_peers.entry(peer.clone()) { - Entry::Occupied(entry) => match entry.get().id == id { - true => { - self.idle_peers.push_back(peer); - Some(entry.remove()) - }, - false => None, - }, - Entry::Vacant(_) => None, - } - } - - /// Removes a peer from the list of known peers. - /// - /// Puts back the active request that this node was performing into `pending_requests`. - fn remove_peer(&mut self, peer: &PeerId) { - self.best_blocks.remove(peer); - - if let Some(request) = self.active_peers.remove(peer) { - self.pending_requests.push_front(request); - return; - } - - if let Some(idle_index) = self.idle_peers.iter().position(|i| i == peer) { - self.idle_peers.swap_remove_back(idle_index); - } - } - - /// Dispatches pending requests. - fn dispatch(&mut self, mut network: impl LightDispatchNetwork) { - let mut last_peer = self.idle_peers.back().cloned(); - let mut unhandled_requests = VecDeque::new(); - - loop { - let peer = match self.idle_peers.pop_front() { - Some(peer) => peer, - None => break, - }; - - // check if request can (optimistically) be processed by the peer - let can_be_processed_by_peer = { - let request = match self.pending_requests.front() { - Some(r) => r, - None => { - self.idle_peers.push_front(peer); - break; - }, - }; - let peer_best_block = self.best_blocks.get(&peer) - .expect("entries are inserted into best_blocks when peer is connected; - entries are removed from best_blocks when peer is disconnected; - peer is in idle_peers and thus connected; qed"); - request.required_block() <= *peer_best_block - }; - - if !can_be_processed_by_peer { - // return peer to the back of the queue - self.idle_peers.push_back(peer.clone()); - - // we have enumerated all peers and no one can handle request - if Some(peer) == last_peer { - let request = self.pending_requests.pop_front().expect("checked in loop condition; qed"); - unhandled_requests.push_back(request); - last_peer = self.idle_peers.back().cloned(); - } - - continue; - } - - last_peer = self.idle_peers.back().cloned(); - - let mut request = self.pending_requests.pop_front().expect("checked in loop condition; qed"); - request.timestamp = Instant::now(); - trace!(target: "sync", "Dispatching remote request {} to peer {}", request.id, peer); - request.send_to(&mut network, &peer); - self.active_peers.insert(peer, request); - } - - self.pending_requests.append(&mut unhandled_requests); - } -} - -impl Request { - /// Returns the block that the remote needs to have in order to be able to fulfill - /// this request. - fn required_block(&self) -> NumberFor { - match self.data { - RequestData::RemoteHeader(ref data, _) => data.block, - RequestData::RemoteRead(ref data, _) => *data.header.number(), - RequestData::RemoteReadChild(ref data, _) => *data.header.number(), - RequestData::RemoteCall(ref data, _) => *data.header.number(), - RequestData::RemoteChanges(ref data, _) => data.max_block.0, - RequestData::RemoteBody(ref data, _) => *data.header.number(), - } - } - - fn send_to(&self, out: &mut impl LightDispatchNetwork, peer: &PeerId) { - match self.data { - RequestData::RemoteHeader(ref data, _) => - out.send_header_request( - peer, - self.id, - data.block, - ), - RequestData::RemoteRead(ref data, _) => - out.send_read_request( - peer, - self.id, - data.block, - data.keys.clone(), - ), - RequestData::RemoteReadChild(ref data, _) => - out.send_read_child_request( - peer, - self.id, - data.block, - data.storage_key.clone(), - data.child_info.clone(), - data.child_type, - data.keys.clone(), - ), - RequestData::RemoteCall(ref data, _) => - out.send_call_request( - peer, - self.id, - data.block, - data.method.clone(), - data.call_data.clone(), - ), - RequestData::RemoteChanges(ref data, _) => - out.send_changes_request( - peer, - self.id, - data.first_block.1.clone(), - data.last_block.1.clone(), - data.tries_roots.1.clone(), - data.max_block.1.clone(), - data.storage_key.clone(), - data.key.clone(), - ), - RequestData::RemoteBody(ref data, _) => - out.send_body_request( - peer, - self.id, - message::BlockAttributes::BODY, - message::FromBlock::Hash(data.header.hash()), - None, - message::Direction::Ascending, - Some(1) - ), - } - } -} - -impl RequestData { - fn fail(self, error: ClientError) { - // don't care if anyone is listening - match self { - RequestData::RemoteHeader(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteCall(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteRead(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteReadChild(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteChanges(_, sender) => { let _ = sender.send(Err(error)); }, - RequestData::RemoteBody(_, sender) => { let _ = sender.send(Err(error)); }, - } - } -} - -#[cfg(test)] -pub mod tests { - use std::collections::{HashMap, HashSet}; - use std::sync::Arc; - use std::time::Instant; - use futures::channel::oneshot; - use sp_core::storage::ChildInfo; - use sp_runtime::traits::{Block as BlockT, NumberFor, Header as HeaderT}; - use sp_blockchain::{Error as ClientError, Result as ClientResult}; - use sc_client_api::{FetchChecker, RemoteHeaderRequest, - ChangesProof, RemoteCallRequest, RemoteReadRequest, - RemoteReadChildRequest, RemoteChangesRequest, RemoteBodyRequest}; - use crate::config::Roles; - use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; - use libp2p::PeerId; - use super::{REQUEST_TIMEOUT, LightDispatch, LightDispatchNetwork, RequestData, StorageProof}; - use sp_test_primitives::{Block, Header}; - - pub(crate) struct DummyFetchChecker { - pub(crate) ok: bool, - _mark: std::marker::PhantomData - } - - impl DummyFetchChecker { - pub(crate) fn new(ok: bool) -> Self { - DummyFetchChecker { ok, _mark: std::marker::PhantomData } - } - } - - impl FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - header: Option, - _remote_proof: StorageProof, - ) -> ClientResult { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - _: StorageProof, - ) -> ClientResult, Option>>> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - _: StorageProof, - ) -> ClientResult, Option>>> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &RemoteCallRequest, - _: StorageProof, - ) -> ClientResult> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &RemoteChangesRequest, - _: ChangesProof - ) -> ClientResult, u32)>> { - match self.ok { - true => Ok(vec![(100.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &RemoteBodyRequest, - body: Vec - ) -> ClientResult> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - fn dummy(ok: bool) -> LightDispatch { - LightDispatch::new(Arc::new(DummyFetchChecker::new(ok))) - } - - fn total_peers(light_dispatch: &LightDispatch) -> usize { - light_dispatch.idle_peers.len() + light_dispatch.active_peers.len() - } - - fn receive_call_response( - network_interface: impl LightDispatchNetwork, - light_dispatch: &mut LightDispatch, - peer: PeerId, - id: message::RequestId - ) { - light_dispatch.on_remote_call_response(network_interface, peer, message::RemoteCallResponse { - id: id, - proof: StorageProof::empty(), - }); - } - - pub(crate) fn dummy_header() -> Header { - Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - #[derive(Default)] - struct DummyNetwork { - disconnected_peers: HashSet, - } - - impl<'a, B: BlockT> LightDispatchNetwork for &'a mut DummyNetwork { - fn report_peer(&mut self, _: &PeerId, _: crate::ReputationChange) {} - fn disconnect_peer(&mut self, who: &PeerId) { - self.disconnected_peers.insert(who.clone()); - } - fn send_header_request(&mut self, _: &PeerId, _: RequestId, _: <::Header as HeaderT>::Number) {} - fn send_read_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec>) {} - fn send_read_child_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: Vec, - _: Vec, _: u32, _: Vec>) {} - fn send_call_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: String, _: Vec) {} - fn send_changes_request(&mut self, _: &PeerId, _: RequestId, _: ::Hash, _: ::Hash, - _: ::Hash, _: ::Hash, _: Option>, _: Vec) {} - fn send_body_request(&mut self, _: &PeerId, _: RequestId, _: BlockAttributes, _: FromBlock<::Hash, - <::Header as HeaderT>::Number>, _: Option, _: Direction, _: Option) {} - } - - fn assert_disconnected_peer(dummy: &DummyNetwork) { - assert_eq!(dummy.disconnected_peers.len(), 1); - } - - #[test] - fn knows_about_peers_roles() { - let mut network_interface = DummyNetwork::default(); - let mut light_dispatch = dummy(true); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0, Roles::LIGHT, 1000); - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 2000); - light_dispatch.on_connect(&mut network_interface, peer2.clone(), Roles::AUTHORITY, 3000); - assert_eq!(vec![peer1.clone(), peer2.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert_eq!(light_dispatch.best_blocks.get(&peer1), Some(&2000)); - assert_eq!(light_dispatch.best_blocks.get(&peer2), Some(&3000)); - } - - #[test] - fn disconnects_from_idle_peer() { - let peer0 = PeerId::random(); - - let mut network_interface = DummyNetwork::default(); - let mut light_dispatch = dummy(true); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 100); - assert_eq!(1, total_peers(&light_dispatch)); - assert!(!light_dispatch.best_blocks.is_empty()); - - light_dispatch.on_disconnect(&mut network_interface, &peer0); - assert_eq!(0, total_peers(&light_dispatch)); - assert!(light_dispatch.best_blocks.is_empty()); - } - - #[test] - fn disconnects_from_timeouted_peer() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 1000); - assert_eq!(vec![peer0.clone(), peer1.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert!(light_dispatch.active_peers.is_empty()); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }, oneshot::channel().0)); - assert_eq!(vec![peer1.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert_eq!(vec![peer0.clone()], light_dispatch.active_peers.keys().cloned().collect::>()); - - light_dispatch.active_peers[&peer0].timestamp = Instant::now() - REQUEST_TIMEOUT - REQUEST_TIMEOUT; - light_dispatch.maintain_peers(&mut network_interface); - assert!(light_dispatch.idle_peers.is_empty()); - assert_eq!(vec![peer1.clone()], light_dispatch.active_peers.keys().cloned().collect::>()); - assert_disconnected_peer(&network_interface); - } - - #[test] - fn disconnects_from_peer_on_response_with_wrong_id() { - let mut light_dispatch = dummy(true); - let peer0 = PeerId::random(); - let mut network_interface = DummyNetwork::default(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }, oneshot::channel().0)); - receive_call_response(&mut network_interface, &mut light_dispatch, peer0, 1); - assert_disconnected_peer(&network_interface); - assert_eq!(light_dispatch.pending_requests.len(), 1); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let mut light_dispatch = dummy(false); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }, oneshot::channel().0)); - - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - receive_call_response(&mut network_interface, &mut light_dispatch, peer0.clone(), 0); - assert_disconnected_peer(&network_interface); - assert_eq!(light_dispatch.pending_requests.len(), 1); - } - - #[test] - fn disconnects_from_peer_on_unexpected_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - receive_call_response(&mut network_interface, &mut light_dispatch, peer0, 0); - assert_disconnected_peer(&network_interface); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let mut light_dispatch = dummy(false); - let peer0 = PeerId::random(); - let mut network_interface = DummyNetwork::default(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }, oneshot::channel().0)); - - light_dispatch.on_remote_read_response(&mut network_interface, peer0.clone(), message::RemoteReadResponse { - id: 0, - proof: StorageProof::empty(), - }); - assert_disconnected_peer(&network_interface); - assert_eq!(light_dispatch.pending_requests.len(), 1); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let retry_count = 2; - let peer_ids = (0 .. retry_count + 1).map(|_| PeerId::random()).collect::>(); - let mut light_dispatch = dummy(false); - let mut network_interface = DummyNetwork::default(); - for i in 0..retry_count+1 { - light_dispatch.on_connect(&mut network_interface, peer_ids[i].clone(), Roles::FULL, 1000); - } - - let (tx, mut response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(retry_count) - }, tx)); - - for i in 0..retry_count { - assert!(response.try_recv().unwrap().is_none()); - receive_call_response(&mut network_interface, &mut light_dispatch, peer_ids[i].clone(), i as u64); - } - - assert!(response.try_recv().unwrap().unwrap().is_err()); - } - - #[test] - fn receives_remote_call_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteCall(RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }, tx)); - - receive_call_response(&mut network_interface, &mut light_dispatch, peer0.clone(), 0); - assert_eq!(futures::executor::block_on(response).unwrap().unwrap(), vec![42]); - } - - #[test] - fn receives_remote_read_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteRead(RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }, tx)); - - light_dispatch.on_remote_read_response(&mut network_interface, peer0.clone(), message::RemoteReadResponse { - id: 0, - proof: StorageProof::empty(), - }); - assert_eq!( - futures::executor::block_on(response).unwrap().unwrap().remove(b":key".as_ref()).unwrap(), - Some(vec![42]) - ); - } - - #[test] - fn receives_remote_read_child_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - let (tx, response) = oneshot::channel(); - let child_info = ChildInfo::new_default(b"unique_id_1"); - let (child_info, child_type) = child_info.info(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteReadChild(RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), - child_info: child_info.to_vec(), - child_type, - keys: vec![b":key".to_vec()], - retry_count: None, - }, tx)); - - light_dispatch.on_remote_read_response(&mut network_interface, - peer0.clone(), message::RemoteReadResponse { - id: 0, - proof: StorageProof::empty(), - }); - assert_eq!(futures::executor::block_on(response).unwrap().unwrap().remove(b":key".as_ref()).unwrap(), Some(vec![42])); - } - - #[test] - fn receives_remote_header_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }, tx)); - - light_dispatch.on_remote_header_response(&mut network_interface, peer0.clone(), message::RemoteHeaderResponse { - id: 0, - header: Some(Header { - parent_hash: Default::default(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }), - proof: StorageProof::empty(), - }); - assert_eq!( - futures::executor::block_on(response).unwrap().unwrap().hash(), - "6443a0b46e0412e626363028115a9f2cf963eeed526b8b33e5316f08b50d0dc3".parse().unwrap(), - ); - } - - #[test] - fn receives_remote_changes_response() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer0 = PeerId::random(); - light_dispatch.on_connect(&mut network_interface, peer0.clone(), Roles::FULL, 1000); - - let (tx, response) = oneshot::channel(); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteChanges(RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), vec![]), - key: vec![], - storage_key: None, - retry_count: None, - }, tx)); - - light_dispatch.on_remote_changes_response(&mut network_interface, peer0.clone(), message::RemoteChangesResponse { - id: 0, - max: 1000, - proof: vec![vec![2]], - roots: vec![], - roots_proof: StorageProof::empty(), - }); - assert_eq!(futures::executor::block_on(response).unwrap().unwrap(), vec![(100, 2)]); - } - - #[test] - fn does_not_sends_request_to_peer_who_has_no_required_block() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 100); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 200, - retry_count: None, - }, oneshot::channel().0)); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }, oneshot::channel().0)); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }, oneshot::channel().0)); - - light_dispatch.on_connect(&mut network_interface, peer2.clone(), Roles::FULL, 150); - - assert_eq!(vec![peer1.clone(), peer2.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert_eq!(light_dispatch.pending_requests.len(), 3); - - light_dispatch.update_best_number(&mut network_interface, peer1.clone(), 250); - - assert_eq!(vec![peer2.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert_eq!(light_dispatch.pending_requests.len(), 2); - - light_dispatch.update_best_number(&mut network_interface, peer2.clone(), 250); - - assert!(!light_dispatch.idle_peers.iter().any(|_| true)); - assert_eq!(light_dispatch.pending_requests.len(), 1); - - light_dispatch.on_remote_header_response(&mut network_interface, peer1.clone(), message::RemoteHeaderResponse { - id: 0, - header: Some(dummy_header()), - proof: StorageProof::empty(), - }); - - assert!(!light_dispatch.idle_peers.iter().any(|_| true)); - assert_eq!(light_dispatch.pending_requests.len(), 0); - } - - #[test] - fn does_not_loop_forever_after_dispatching_request_to_last_peer() { - // this test is a regression for a bug where the dispatch function would - // loop forever after dispatching a request to the last peer, since the - // last peer was not updated - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - let peer3 = PeerId::random(); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }, oneshot::channel().0)); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }, oneshot::channel().0)); - - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 200); - light_dispatch.on_connect(&mut network_interface, peer2.clone(), Roles::FULL, 200); - light_dispatch.on_connect(&mut network_interface, peer3.clone(), Roles::FULL, 250); - - assert_eq!(vec![peer1.clone(), peer2.clone()], light_dispatch.idle_peers.iter().cloned().collect::>()); - assert_eq!(light_dispatch.pending_requests.len(), 1); - } - - #[test] - fn tries_to_send_all_pending_requests() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer1 = PeerId::random(); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 300, - retry_count: None, - }, oneshot::channel().0)); - light_dispatch.add_request(&mut network_interface, RequestData::RemoteHeader(RemoteHeaderRequest { - cht_root: Default::default(), - block: 250, - retry_count: None, - }, oneshot::channel().0)); - - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 250); - - assert!(light_dispatch.idle_peers.iter().cloned().collect::>().is_empty()); - assert_eq!(light_dispatch.pending_requests.len(), 1); - } - - #[test] - fn remote_body_with_one_block_body_should_succeed() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer1 = PeerId::random(); - - let header = dummy_header(); - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 250); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteBody(RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }, oneshot::channel().0)); - - assert!(light_dispatch.pending_requests.is_empty()); - assert_eq!(light_dispatch.active_peers.len(), 1); - - let block = message::BlockData:: { - hash: sp_core::H256::random(), - header: None, - body: Some(Vec::new()), - message_queue: None, - receipt: None, - justification: None, - }; - - let response = message::generic::BlockResponse { - id: 0, - blocks: vec![block], - }; - - light_dispatch.on_remote_body_response(&mut network_interface, peer1.clone(), response); - - assert!(light_dispatch.active_peers.is_empty()); - assert_eq!(light_dispatch.idle_peers.len(), 1); - } - - #[test] - fn remote_body_with_three_bodies_should_fail() { - let mut light_dispatch = dummy(true); - let mut network_interface = DummyNetwork::default(); - let peer1 = PeerId::random(); - - let header = dummy_header(); - light_dispatch.on_connect(&mut network_interface, peer1.clone(), Roles::FULL, 250); - - light_dispatch.add_request(&mut network_interface, RequestData::RemoteBody(RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }, oneshot::channel().0)); - - assert!(light_dispatch.pending_requests.is_empty()); - assert_eq!(light_dispatch.active_peers.len(), 1); - - let response = { - let blocks: Vec<_> = (0..3).map(|_| message::BlockData:: { - hash: sp_core::H256::random(), - header: None, - body: Some(Vec::new()), - message_queue: None, - receipt: None, - justification: None, - }).collect(); - - message::generic::BlockResponse { - id: 0, - blocks, - } - }; - - light_dispatch.on_remote_body_response(&mut network_interface, peer1.clone(), response); - assert!(light_dispatch.active_peers.is_empty()); - assert!(light_dispatch.idle_peers.is_empty(), "peer should be disconnected after bad response"); - } -} diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a12c26da2e47eec14ae136ede05e6094d9e0c715..ae83b49e60f57e475bb3399ec6af3e9e672e6c8f 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -24,7 +24,7 @@ pub use self::generic::{ RemoteHeaderRequest, RemoteHeaderResponse, RemoteChangesRequest, RemoteChangesResponse, FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadChildRequest, + FromBlock, RemoteReadChildRequest, Roles, }; use sc_client_api::StorageProof; @@ -137,14 +137,71 @@ pub struct RemoteReadResponse { /// Generic types. pub mod generic { + use bitflags::bitflags; use codec::{Encode, Decode, Input, Output}; use sp_runtime::Justification; - use crate::config::Roles; use super::{ RemoteReadResponse, Transactions, Direction, RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, BlockState, StorageProof, }; + + bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } + } + + impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Roles::FULL | Roles::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Roles::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } + } + + impl<'a> From<&'a crate::config::Role> for Roles { + fn from(roles: &'a crate::config::Role) -> Self { + match roles { + crate::config::Role::Full => Roles::FULL, + crate::config::Role::Light => Roles::LIGHT, + crate::config::Role::Sentry { .. } => Roles::AUTHORITY, + crate::config::Role::Authority { .. } => Roles::AUTHORITY, + } + } + } + + impl codec::Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } + } + + impl codec::EncodeLike for Roles {} + + impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } + } + /// Consensus is mostly opaque to us #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct ConsensusMessage { diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index ffbe6a096ae69e9616e357d6342f0544790478f0..9feded784fe78871646879ba49e3b19a866a6d7c 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -34,9 +34,9 @@ use sp_consensus::{BlockOrigin, BlockStatus, import_queue::{IncomingBlock, BlockImportResult, BlockImportError} }; use crate::{ - config::{Roles, BoxFinalityProofRequestBuilder}, + config::BoxFinalityProofRequestBuilder, protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, - FinalityProofResponse}, + FinalityProofResponse, Roles}, }; use either::Either; use extra_requests::ExtraRequests; @@ -85,7 +85,7 @@ mod rep { pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); /// Reputation change for peers which send us a block which we fail to verify. - pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 20), "Block verification failed"); + pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); /// Reputation change for peers which send us a known bad block. pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); @@ -138,6 +138,8 @@ pub struct ChainSync { block_announce_validator: Box + Send>, /// Maximum number of peers to ask the same blocks in parallel. max_parallel_downloads: u32, + /// Total number of processed blocks (imported or failed). + processed_blocks: usize, } /// All the data we have about a Peer that we are trying to sync with @@ -318,6 +320,7 @@ impl ChainSync { is_idle: false, block_announce_validator, max_parallel_downloads, + processed_blocks: 0, } } @@ -357,6 +360,11 @@ impl ChainSync { self.fork_targets.len() } + /// Number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.processed_blocks + } + /// Handle a new connected peer. /// /// Call this method whenever we connect to a new peer. @@ -370,12 +378,12 @@ impl ChainSync { Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) } Ok(BlockStatus::KnownBad) => { - info!("New peer with known bad best block {} ({}).", best_hash, best_number); + info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); Err(BadPeer(who, rep::BAD_BLOCK)) } Ok(BlockStatus::Unknown) => { if best_number.is_zero() { - info!("New peer with unknown genesis hash {} ({}).", best_hash, best_number); + info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); return Err(BadPeer(who, rep::GENESIS_MISMATCH)); } // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have @@ -649,7 +657,7 @@ impl ChainSync { pub fn on_block_data (&mut self, who: PeerId, request: Option>, response: BlockResponse) -> Result, BadPeer> { - let new_blocks: Vec> = + let mut new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(&who) { let mut blocks = response.blocks; if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { @@ -703,7 +711,7 @@ impl ChainSync { return Err(BadPeer(who, rep::UNKNOWN_ANCESTOR)) }, (_, Err(e)) => { - info!("Error answering legitimate blockchain query: {:?}", e); + info!("❌ Error answering legitimate blockchain query: {:?}", e); return Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) } }; @@ -768,6 +776,12 @@ impl ChainSync { Vec::new() }; + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); + } + let is_recent = new_blocks.first() .map(|block| { self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) @@ -804,7 +818,7 @@ impl ChainSync { if let Some(peer) = self.peers.get_mut(&who) { peer } else { - error!(target: "sync", "Called on_block_justification with a bad peer ID"); + error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); return Ok(OnBlockJustification::Nothing) }; @@ -817,7 +831,7 @@ impl ChainSync { if hash != block.hash { info!( target: "sync", - "Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash ); return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); } @@ -851,7 +865,7 @@ impl ChainSync { if let Some(peer) = self.peers.get_mut(&who) { peer } else { - error!(target: "sync", "Called on_block_finality_proof_data with a bad peer ID"); + error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); return Ok(OnBlockFinalityProof::Nothing) }; @@ -863,7 +877,7 @@ impl ChainSync { if hash != resp.block { info!( target: "sync", - "Invalid block finality proof provided: requested: {:?} got: {:?}", + "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", hash, resp.block ); @@ -895,10 +909,12 @@ impl ChainSync { let mut output = Vec::new(); let mut has_error = false; - let mut hashes = vec![]; - for (result, hash) in results { - hashes.push(hash); + for (_, hash) in &results { + self.queue_blocks.remove(&hash); + } + self.processed_blocks += results.len(); + for (result, hash) in results { if has_error { continue; } @@ -927,7 +943,7 @@ impl ChainSync { if aux.bad_justification { if let Some(peer) = who { - info!("Sent block with bad justification to import"); + info!("💔 Sent block with bad justification to import"); output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); } } @@ -943,43 +959,39 @@ impl ChainSync { }, Err(BlockImportError::IncompleteHeader(who)) => { if let Some(peer) = who { - info!("Peer sent block with incomplete header to import"); + warn!("💔 Peer sent block with incomplete header to import"); output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); output.extend(self.restart()); } }, Err(BlockImportError::VerificationFailed(who, e)) => { if let Some(peer) = who { - info!("Verification failed from peer: {}", e); + warn!("💔 Verification failed for block {:?} received from peer: {}, {:?}", hash, peer, e); output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); output.extend(self.restart()); } }, Err(BlockImportError::BadBlock(who)) => { if let Some(peer) = who { - info!("Block received from peer has been blacklisted"); + info!("💔 Block {:?} received from peer {} has been blacklisted", hash, peer); output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - output.extend(self.restart()); } }, Err(BlockImportError::MissingState) => { // This may happen if the chain we were requesting upon has been discarded // in the meantime because other chain has been finalized. // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: "sync", "Obsolete block"); + trace!(target: "sync", "Obsolete block {:?}", hash); }, - Err(BlockImportError::UnknownParent) | - Err(BlockImportError::Cancelled) | - Err(BlockImportError::Other(_)) => { + e @ Err(BlockImportError::UnknownParent) | + e @ Err(BlockImportError::Other(_)) => { + warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); output.extend(self.restart()); }, + Err(BlockImportError::Cancelled) => {} }; } - for hash in hashes { - self.queue_blocks.remove(&hash); - } - self.is_idle = false; output.into_iter() } @@ -1005,7 +1017,7 @@ impl ChainSync { }); if let Err(err) = r { - warn!(target: "sync", "Error cleaning up pending extra finality proof data requests: {:?}", err) + warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) } let client = &self.client; @@ -1014,7 +1026,7 @@ impl ChainSync { }); if let Err(err) = r { - warn!(target: "sync", "Error cleaning up pending extra justification data requests: {:?}", err); + warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); } } @@ -1068,7 +1080,7 @@ impl ChainSync { let number = *header.number(); debug!(target: "sync", "Received block announcement {:?} with number {:?} from {}", hash, number, who); if number.is_zero() { - warn!(target: "sync", "Ignored genesis block (#0) announcement from {}: {}", who, hash); + warn!(target: "sync", "💔 Ignored genesis block (#0) announcement from {}: {}", who, hash); return OnBlockAnnounce::Nothing } let parent_status = self.block_status(header.parent_hash()).ok().unwrap_or(BlockStatus::Unknown); @@ -1079,7 +1091,7 @@ impl ChainSync { let peer = if let Some(peer) = self.peers.get_mut(&who) { peer } else { - error!(target: "sync", "Called on_block_announce with a bad peer ID"); + error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); return OnBlockAnnounce::Nothing }; while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { @@ -1094,12 +1106,14 @@ impl ChainSync { if let PeerSyncState::AncestorSearch(_, _) = peer.state { return OnBlockAnnounce::Nothing } - // If the announced block is the best they have seen, our common number + // If the announced block is the best they have and is not ahead of us, our common number // is either one further ahead or it's the one they just announced, if we know about it. if is_best { - if known { + if known && self.best_queued_number >= number { peer.common_number = number - } else if header.parent_hash() == &self.best_queued_hash || known_parent { + } else if header.parent_hash() == &self.best_queued_hash + || known_parent && self.best_queued_number >= number + { peer.common_number = number - One::one(); } } @@ -1123,7 +1137,7 @@ impl ChainSync { return OnBlockAnnounce::Nothing } Err(e) => { - error!(target: "sync", "Block announcement validation errored: {}", e); + error!(target: "sync", "💔 Block announcement validation errored: {}", e); return OnBlockAnnounce::Nothing } } @@ -1168,7 +1182,7 @@ impl ChainSync { /// Restart the sync process. fn restart<'a>(&'a mut self) -> impl Iterator), BadPeer>> + 'a { - self.queue_blocks.clear(); + self.processed_blocks = 0; self.blocks.clear(); let info = self.client.info(); self.best_queued_hash = info.best_hash; @@ -1308,13 +1322,17 @@ fn peer_block_request( finalized: NumberFor, best_num: NumberFor, ) -> Option<(Range>, BlockRequest)> { - if peer.common_number < finalized { - return None; - } if best_num >= peer.best_number { // Will be downloaded as alternative fork instead. return None; } + if peer.common_number < finalized { + trace!( + target: "sync", + "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", + id, finalized, peer.common_number, peer.best_number, best_num, + ); + } if let Some(range) = blocks.needed_blocks( id.clone(), MAX_BLOCKS_TO_REQUEST, diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 31b798ace28a54b207df5d43e625fb2d0c9170f2..359287701e66e5b95b9561bbd5982fd50e77e08d 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -212,7 +212,7 @@ impl BlockCollection { #[cfg(test)] mod test { use super::{BlockCollection, BlockData, BlockRangeState}; - use crate::{message, PeerId}; + use crate::{protocol::message, PeerId}; use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; use sp_core::H256; diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 81b12a1a704aab9b17737c4a03190bef519ff3c5..3d854b574b01fc26aaf6d1d39e15bed2b46551c2 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -221,7 +221,7 @@ impl ExtraRequests { }; if self.tree.finalize_root(&finalized_hash).is_none() { - warn!(target: "sync", "Imported {:?} {:?} which isn't a root in the tree: {:?}", + warn!(target: "sync", "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", finalized_hash, finalized_number, self.tree.roots().collect::>() diff --git a/client/network/src/service.rs b/client/network/src/service.rs index d4176a489fcff18ac9511d7e1438680978ef0e25..cf142afe9dd08a530ccb1d3898cb85b04a78a6b1 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -25,37 +25,56 @@ //! The methods of the [`NetworkService`] are implemented by sending a message over a channel, //! which is then processed by [`NetworkWorker::poll`]. -use std::{borrow::Cow, collections::{HashMap, HashSet}, fs, marker::PhantomData, io, path::Path, str}; -use std::sync::{Arc, atomic::{AtomicBool, AtomicUsize, Ordering}}; -use std::pin::Pin; -use std::task::Poll; - -use sp_consensus::import_queue::{ImportQueue, Link}; -use sp_consensus::import_queue::{BlockImportResult, BlockImportError}; -use futures::{prelude::*, channel::mpsc}; -use log::{warn, error, info, trace}; -use libp2p::{PeerId, Multiaddr, kad::record}; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; +use crate::{ + behaviour::{Behaviour, BehaviourOut}, + config::{parse_addr, parse_str_addr, NonReservedPeerMode, Params, ProtocolId, Role, TransportConfig}, + error::Error, + network_state::{ + NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, + }, + on_demand_layer::AlwaysBadChecker, + protocol::{self, event::Event, light_client_handler, LegacyConnectionKillError, sync::SyncState, PeerInfo, Protocol}, + transport, ReputationChange, +}; +use futures::prelude::*; +use libp2p::{PeerId, Multiaddr}; +use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; +use libp2p::kad::record; +use libp2p::ping::handler::PingFailure; +use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; +use log::{error, info, trace, warn}; use parking_lot::Mutex; +use prometheus_endpoint::{ + register, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, +}; use sc_peerset::PeersetHandle; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; -use prometheus_endpoint::{Registry, Counter, CounterVec, Gauge, GaugeVec, Opts, U64, register, PrometheusError}; - -use crate::{behaviour::{Behaviour, BehaviourOut}, config::{parse_str_addr, parse_addr}}; -use crate::{transport, config::NonReservedPeerMode, ReputationChange}; -use crate::config::{Params, ProtocolId, TransportConfig}; -use crate::error::Error; -use crate::network_state::{NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer}; -use crate::protocol::{self, Protocol, PeerInfo}; -use crate::protocol::{event::Event, light_dispatch::{AlwaysBadChecker, RequestData}}; -use crate::protocol::sync::SyncState; +use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + ConsensusEngineId, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, + fs, io, + marker::PhantomData, + pin::Pin, + str, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, + task::Poll, +}; +#[cfg(test)] +mod tests; /// Minimum Requirements for a Hash within Networking pub trait ExHashT: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} -impl ExHashT for T where - T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static +impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + Send + Sync + 'static {} /// Transaction pool interface @@ -148,7 +167,7 @@ pub struct NetworkService { /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: mpsc::UnboundedSender>, + to_worker: TracingUnboundedSender>, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, @@ -161,28 +180,24 @@ impl NetworkWorker { /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. pub fn new(params: Params) -> Result, Error> { - let (to_worker, from_worker) = mpsc::unbounded(); + let (to_worker, from_worker) = tracing_unbounded("mpsc_network_worker"); - if let Some(ref path) = params.network_config.net_config_path { - fs::create_dir_all(Path::new(path))?; - } + fs::create_dir_all(¶ms.network_config.net_config_path)?; // List of multiaddresses that we know in the network. let mut known_addresses = Vec::new(); let mut bootnodes = Vec::new(); - let mut reserved_nodes = Vec::new(); + let mut boot_node_ids = HashSet::new(); // Process the bootnodes. for bootnode in params.network_config.boot_nodes.iter() { - match parse_str_addr(bootnode) { - Ok((peer_id, addr)) => { - bootnodes.push(peer_id.clone()); - known_addresses.push((peer_id, addr)); - }, - Err(_) => warn!(target: "sub-libp2p", "Not a valid bootnode address: {}", bootnode), - } + bootnodes.push(bootnode.peer_id.clone()); + boot_node_ids.insert(bootnode.peer_id.clone()); + known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); } + let boot_node_ids = Arc::new(boot_node_ids); + // Check for duplicate bootnodes. known_addresses.iter() .try_for_each(|(peer_id, addr)| @@ -200,29 +215,56 @@ impl NetworkWorker { } )?; - // Initialize the reserved peers. - for reserved in params.network_config.reserved_nodes.iter() { - if let Ok((peer_id, addr)) = parse_str_addr(reserved) { - reserved_nodes.push(peer_id.clone()); - known_addresses.push((peer_id, addr)); - } else { - warn!(target: "sub-libp2p", "Not a valid reserved node address: {}", reserved); + // Initialize the peers we should always be connected to. + let priority_groups = { + let mut reserved_nodes = HashSet::new(); + for reserved in params.network_config.reserved_nodes.iter() { + reserved_nodes.insert(reserved.peer_id.clone()); + known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); } - } + + let mut sentries_and_validators = HashSet::new(); + match ¶ms.role { + Role::Sentry { validators } => { + for validator in validators { + sentries_and_validators.insert(validator.peer_id.clone()); + known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); + } + } + Role::Authority { sentry_nodes } => { + for sentry_node in sentry_nodes { + sentries_and_validators.insert(sentry_node.peer_id.clone()); + known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); + } + } + _ => {} + } + + vec![ + ("reserved".to_owned(), reserved_nodes), + ("sentries_and_validators".to_owned(), sentries_and_validators), + ] + }; let peerset_config = sc_peerset::PeersetConfig { in_peers: params.network_config.in_peers, out_peers: params.network_config.out_peers, bootnodes, reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, - reserved_nodes, + priority_groups, }; // Private and public keys configuration. let local_identity = params.network_config.node_key.clone().into_keypair()?; let local_public = local_identity.public(); let local_peer_id = local_public.clone().into_peer_id(); - info!(target: "sub-libp2p", "Local node identity is: {}", local_peer_id.to_base58()); + info!(target: "sub-libp2p", "🏷 Local node identity is: {}", local_peer_id.to_base58()); + + // Initialize the metrics. + let metrics = match ¶ms.metrics_registry { + Some(registry) => Some(Metrics::register(®istry)?), + None => None + }; let checker = params.on_demand.as_ref() .map(|od| od.checker().clone()) @@ -232,22 +274,23 @@ impl NetworkWorker { let is_major_syncing = Arc::new(AtomicBool::new(false)); let (protocol, peerset_handle) = Protocol::new( protocol::ProtocolConfig { - roles: params.roles, + roles: From::from(¶ms.role), max_parallel_downloads: params.network_config.max_parallel_downloads, }, params.chain.clone(), - checker.clone(), params.transaction_pool, params.finality_proof_provider.clone(), params.finality_proof_request_builder, params.protocol_id.clone(), peerset_config, params.block_announce_validator, - params.metrics_registry.as_ref() + params.metrics_registry.as_ref(), + boot_node_ids.clone(), + metrics.as_ref().map(|m| m.notifications_queues_size.clone()), )?; // Build the swarm. - let (mut swarm, bandwidth): (Swarm::, _) = { + let (mut swarm, bandwidth): (Swarm, _) = { let user_agent = format!( "{} ({})", params.network_config.client_version, @@ -259,10 +302,16 @@ impl NetworkWorker { }; let light_client_handler = { let config = protocol::light_client_handler::Config::new(¶ms.protocol_id); - protocol::LightClientHandler::new(config, params.chain, checker, peerset_handle.clone()) + protocol::LightClientHandler::new( + config, + params.chain, + checker, + peerset_handle.clone(), + ) }; let mut behaviour = futures::executor::block_on(Behaviour::new( protocol, + params.role, user_agent, local_public, known_addresses, @@ -281,6 +330,9 @@ impl NetworkWorker { // Temporary hack to stay backwards compatible until we migrated to one DHT per protocol. behaviour.add_discovery(ProtocolId::from(libp2p::kad::protocol::DEFAULT_PROTO_NAME)); behaviour.add_discovery(params.protocol_id.clone()); + for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { + behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); + } let (transport, bandwidth) = { let (config_mem, config_wasm, flowctrl) = match params.network_config.transport { TransportConfig::MemoryOnly => (true, None, false), @@ -289,9 +341,16 @@ impl NetworkWorker { }; transport::build_transport(local_identity, config_mem, config_wasm, flowctrl) }; - let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()); + let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER); if let Some(spawner) = params.executor { - builder = builder.executor_fn(spawner); + struct SpawnImpl(F); + impl + Send>>)> Executor for SpawnImpl { + fn exec(&self, f: Pin + Send>>) { + (self.0)(f) + } + } + builder = builder.executor(Box::new(SpawnImpl(spawner))); } (builder.build(), bandwidth) }; @@ -331,10 +390,8 @@ impl NetworkWorker { from_worker, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: Vec::new(), - metrics: match params.metrics_registry { - Some(registry) => Some(Metrics::register(®istry)?), - None => None - } + metrics, + boot_node_ids, }) } @@ -378,6 +435,11 @@ impl NetworkWorker { self.network_service.user_protocol().num_queued_blocks() } + /// Returns the number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.network_service.user_protocol().num_processed_blocks() + } + /// Number of active sync requests. pub fn num_sync_requests(&self) -> usize { self.network_service.user_protocol().num_sync_requests() @@ -395,8 +457,8 @@ impl NetworkWorker { } /// You must call this when a new block is imported by the client. - pub fn on_block_imported(&mut self, header: B::Header, data: Vec, is_best: bool) { - self.network_service.user_protocol_mut().on_block_imported(&header, data, is_best); + pub fn on_block_imported(&mut self, header: B::Header, is_best: bool) { + self.network_service.user_protocol_mut().on_block_imported(&header, is_best); } /// You must call this when a new block is finalized by the client. @@ -486,6 +548,11 @@ impl NetworkWorker { } impl NetworkService { + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + &self.local_peer_id + } + /// Writes a message on an open notifications channel. Has no effect if the notifications /// channel with this protocol name is closed. /// @@ -512,7 +579,7 @@ impl NetworkService { /// The stream never ends (unless the `NetworkWorker` gets shut down). pub fn event_stream(&self) -> impl Stream { // Note: when transitioning to stable futures, remove the `Error` entirely - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_network_event_stream"); let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); rx } @@ -732,7 +799,7 @@ enum ServiceToWorkerMsg { PutValue(record::Key, Vec), AddKnownAddress(PeerId, Multiaddr), SyncFork(Vec, B::Hash, NumberFor), - EventStream(mpsc::UnboundedSender), + EventStream(TracingUnboundedSender), WriteNotification { message: Vec, engine_id: ConsensusEngineId, @@ -763,30 +830,41 @@ pub struct NetworkWorker { /// The import queue that was passed as initialization. import_queue: Box>, /// Messages from the `NetworkService` and that must be processed. - from_worker: mpsc::UnboundedReceiver>, + from_worker: TracingUnboundedReceiver>, /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, + light_client_rqs: Option>>, /// Senders for events that happen on the network. - event_streams: Vec>, + event_streams: Vec>, /// Prometheus network metrics. metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, } struct Metrics { // This list is ordered alphabetically - connections: Gauge, + connections: GaugeVec, + connections_closed_total: CounterVec, import_queue_blocks_submitted: Counter, import_queue_finality_proofs_submitted: Counter, import_queue_justifications_submitted: Counter, + incoming_connections_errors_total: Counter, + incoming_connections_total: Counter, is_major_syncing: Gauge, + issued_light_requests: Counter, kbuckets_num_nodes: Gauge, + listeners_local_addresses: Gauge, + listeners_errors_total: Counter, network_per_sec_bytes: GaugeVec, - notifications_total: CounterVec, + notifications_queues_size: HistogramVec, + notifications_sizes: HistogramVec, num_event_stream_channels: Gauge, opened_notification_streams: GaugeVec, peers_count: Gauge, peerset_num_discovered: Gauge, peerset_num_requested: Gauge, + pending_connections: Gauge, + pending_connections_errors_total: CounterVec, random_kademalia_queries_total: Counter, } @@ -794,8 +872,19 @@ impl Metrics { fn register(registry: &Registry) -> Result { Ok(Self { // This list is ordered alphabetically - connections: register(Gauge::new( - "sub_libp2p_connections", "Number of libp2p connections" + connections: register(GaugeVec::new( + Opts::new( + "sub_libp2p_connections", + "Number of established libp2p connections" + ), + &["direction"] + )?, registry)?, + connections_closed_total: register(CounterVec::new( + Opts::new( + "sub_libp2p_connections_closed_total", + "Total number of connections closed, by reason" + ), + &["reason"] )?, registry)?, import_queue_blocks_submitted: register(Counter::new( "import_queue_blocks_submitted", @@ -809,12 +898,31 @@ impl Metrics { "import_queue_justifications_submitted", "Number of justifications submitted to the import queue.", )?, registry)?, + incoming_connections_errors_total: register(Counter::new( + "sub_libp2p_incoming_connections_handshake_errors_total", + "Total number of incoming connections that have failed during the initial handshake" + )?, registry)?, + incoming_connections_total: register(Counter::new( + "sub_libp2p_incoming_connections_total", + "Total number of incoming connections on the listening sockets" + )?, registry)?, is_major_syncing: register(Gauge::new( "sub_libp2p_is_major_syncing", "Whether the node is performing a major sync or not.", )?, registry)?, + issued_light_requests: register(Counter::new( + "issued_light_requests", + "Number of light client requests that our node has issued.", + )?, registry)?, kbuckets_num_nodes: register(Gauge::new( "sub_libp2p_kbuckets_num_nodes", "Number of nodes in the Kademlia k-buckets" )?, registry)?, + listeners_local_addresses: register(Gauge::new( + "sub_libp2p_listeners_local_addresses", "Number of local addresses we're listening on" + )?, registry)?, + listeners_errors_total: register(Counter::new( + "sub_libp2p_listeners_errors_total", + "Total number of non-fatal errors reported by a listener" + )?, registry)?, network_per_sec_bytes: register(GaugeVec::new( Opts::new( "sub_libp2p_network_per_sec_bytes", @@ -822,11 +930,25 @@ impl Metrics { ), &["direction"] )?, registry)?, - notifications_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_notifications_total", - "Number of notification received from all nodes" - ), + notifications_queues_size: register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_queues_size", + "Total size of all the notification queues" + ), + buckets: vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0], + }, + &["protocol"] + )?, registry)?, + notifications_sizes: register(HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes" + ), + buckets: prometheus_endpoint::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, &["direction", "protocol"] )?, registry)?, num_event_stream_channels: register(Gauge::new( @@ -849,6 +971,17 @@ impl Metrics { peerset_num_requested: register(Gauge::new( "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", )?, registry)?, + pending_connections: register(Gauge::new( + "sub_libp2p_pending_connections", + "Number of connections in the process of being established", + )?, registry)?, + pending_connections_errors_total: register(CounterVec::new( + Opts::new( + "sub_libp2p_pending_connections_errors_total", + "Total number of pending connection errors" + ), + &["reason"] + )?, registry)?, random_kademalia_queries_total: register(Counter::new( "sub_libp2p_random_kademalia_queries_total", "Number of random Kademlia queries started", )?, registry)?, @@ -864,8 +997,10 @@ impl Metrics { self.opened_notification_streams.with_label_values(&[&engine_id_to_string(&engine_id)]).dec(); }, Event::NotificationsReceived { messages, .. } => { - for (engine_id, _) in messages { - self.notifications_total.with_label_values(&["in", &engine_id_to_string(&engine_id)]).inc(); + for (engine_id, message) in messages { + self.notifications_sizes + .with_label_values(&["in", &engine_id_to_string(&engine_id)]) + .observe(message.len() as f64); } }, _ => {} @@ -887,7 +1022,13 @@ impl Future for NetworkWorker { // Check for new incoming light client requests. if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - this.network_service.user_protocol_mut().add_light_client_request(rq); + // This can error if there are too many queued requests already. + if this.network_service.light_client_request(rq).is_err() { + log::warn!("Couldn't start light client request: too many pending requests"); + } + if let Some(metrics) = this.metrics.as_ref() { + metrics.issued_light_requests.inc(); + } } } @@ -920,16 +1061,15 @@ impl Future for NetworkWorker { this.event_streams.push(sender), ServiceToWorkerMsg::WriteNotification { message, engine_id, target } => { if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_total.with_label_values(&["out", &engine_id_to_string(&engine_id)]).inc(); + metrics.notifications_sizes + .with_label_values(&["out", &engine_id_to_string(&engine_id)]) + .observe(message.len() as f64); } this.network_service.user_protocol_mut().write_notification(target, engine_id, message) }, ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { - let events = this.network_service.user_protocol_mut() + this.network_service .register_notifications_protocol(engine_id, protocol_name); - for event in events { - this.event_streams.retain(|sender| sender.unbounded_send(event.clone()).is_ok()); - } }, ServiceToWorkerMsg::DisconnectPeer(who) => this.network_service.user_protocol_mut().disconnect_peer(&who), @@ -973,26 +1113,121 @@ impl Future for NetworkWorker { metrics.update_with_network_event(&ev); } }, - Poll::Ready(SwarmEvent::Connected(peer_id)) => { + Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. }) => { trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); if let Some(metrics) = this.metrics.as_ref() { - metrics.connections.inc(); + match endpoint { + ConnectedPoint::Dialer { .. } => + metrics.connections.with_label_values(&["out"]).inc(), + ConnectedPoint::Listener { .. } => + metrics.connections.with_label_values(&["in"]).inc(), + } + } + }, + Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, .. }) => { + trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + if let Some(metrics) = this.metrics.as_ref() { + match endpoint { + ConnectedPoint::Dialer { .. } => + metrics.connections.with_label_values(&["out"]).dec(), + ConnectedPoint::Listener { .. } => + metrics.connections.with_label_values(&["in"]).dec(), + } + match cause { + ConnectionError::IO(_) => + metrics.connections_closed_total.with_label_values(&["transport-error"]).inc(), + ConnectionError::ConnectionLimit(_) => + metrics.connections_closed_total.with_label_values(&["limit-reached"]).inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::B(EitherError::A(PingFailure::Timeout))))))) => + metrics.connections_closed_total.with_label_values(&["ping-timeout"]).inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( + EitherError::A(EitherError::A(EitherError::B(LegacyConnectionKillError))))))) => + metrics.connections_closed_total.with_label_values(&["force-closed"]).inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => + metrics.connections_closed_total.with_label_values(&["protocol-error"]).inc(), + ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => + metrics.connections_closed_total.with_label_values(&["keep-alive-timeout"]).inc(), + } + } + }, + Poll::Ready(SwarmEvent::NewListenAddr(addr)) => { + trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.inc(); + } + }, + Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => { + trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.dec(); + } + }, + Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error, .. }) => { + trace!( + target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", + peer_id, + address, + error, + ); + + if this.boot_node_ids.contains(&peer_id) { + if let PendingConnectionError::InvalidPeerId = error { + error!( + "💔 Invalid peer ID from bootnode, expected `{}` at address `{}`.", + peer_id, + address, + ); + } + } + + if let Some(metrics) = this.metrics.as_ref() { + match error { + PendingConnectionError::InvalidPeerId => + metrics.pending_connections_errors_total.with_label_values(&["invalid-peer-id"]).inc(), + PendingConnectionError::Transport(_) | PendingConnectionError::IO(_) => + metrics.pending_connections_errors_total.with_label_values(&["transport-error"]).inc(), + } + } + } + Poll::Ready(SwarmEvent::Dialing(peer_id)) => + trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id), + Poll::Ready(SwarmEvent::IncomingConnection { local_addr, send_back_addr }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", + local_addr, send_back_addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_total.inc(); + } + }, + Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", + local_addr, send_back_addr, error); + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_errors_total.inc(); + } + }, + Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { + trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", + peer_id, endpoint); + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_errors_total.inc(); + } + }, + Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => + trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", + address, error), + Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses }) => { + warn!(target: "sub-libp2p", "Libp2p => ListenerClosed: {:?}", reason); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.sub(addresses.len() as u64); } }, - Poll::Ready(SwarmEvent::Disconnected(peer_id)) => { - trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?})", peer_id); + Poll::Ready(SwarmEvent::ListenerError { error }) => { + trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); if let Some(metrics) = this.metrics.as_ref() { - metrics.connections.dec(); + metrics.listeners_errors_total.inc(); } }, - Poll::Ready(SwarmEvent::NewListenAddr(addr)) => - trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr), - Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => - trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr), - Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error }) => - trace!(target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", peer_id, address, error), - Poll::Ready(SwarmEvent::StartConnect(peer_id)) => - trace!(target: "sub-libp2p", "Libp2p => StartConnect({:?})", peer_id), }; } @@ -1021,6 +1256,7 @@ impl Future for NetworkWorker { metrics.peers_count.set(num_connected_peers as u64); metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); + metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); } Poll::Pending @@ -1059,7 +1295,7 @@ impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success); if !success { - info!("Invalid justification provided by {} for #{}", who, hash); + info!("💔 Invalid justification provided by {} for #{}", who, hash); self.protocol.user_protocol_mut().disconnect_peer(&who); self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification")); } @@ -1079,7 +1315,7 @@ impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { let success = finalization_result.is_ok(); self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); if !success { - info!("Invalid finality proof provided by {} for #{}", who, request_block.0); + info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); self.protocol.user_protocol_mut().disconnect_peer(&who); self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..0e097072e6c05961623fc9054ebe4a04512d0c1d --- /dev/null +++ b/client/network/src/service/tests.rs @@ -0,0 +1,270 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::{config, Event, NetworkService, NetworkWorker}; + +use futures::prelude::*; +use sp_runtime::traits::{Block as BlockT, Header as _}; +use std::{sync::Arc, time::Duration}; +use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; + +type TestNetworkService = NetworkService< + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, +>; + +/// Builds a full node to be used for testing. Returns the node service and its associated events +/// stream. +/// +/// > **Note**: We return the events stream in order to not possibly lose events between the +/// > construction of the service and the moment the events stream is grabbed. +fn build_test_full_node(config: config::NetworkConfiguration) + -> (Arc, impl Stream) +{ + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); + + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } + + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + )); + + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::service::EmptyTransactionPool), + protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone()), + ), + metrics_registry: None, + }) + .unwrap(); + + let service = worker.service().clone(); + let event_stream = service.event_stream(); + + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); + + (service, event_stream) +} + +const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; + +/// Builds two nodes and their associated events stream. +/// The nodes are connected together and have the `ENGINE_ID` protocol registered. +fn build_nodes_one_proto() + -> (Arc, impl Stream, Arc, impl Stream) +{ + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; + + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + .. config::NetworkConfiguration::new_local() + }); + + (node1, events_stream1, node2, events_stream2) +} + +#[test] +fn notifications_state_consistent() { + // Runs two nodes and ensures that events are propagated out of the API in a consistent + // correct order, which means no notification received on a closed substream. + + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + + // Write some initial notifications that shouldn't get through. + for _ in 0..(rand::random::() % 5) { + node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + for _ in 0..(rand::random::() % 5) { + node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + + async_std::task::block_on(async move { + // True if we have an active substream from node1 to node2. + let mut node1_to_node2_open = false; + // True if we have an active substream from node2 to node1. + let mut node2_to_node1_open = false; + // We stop the test after a certain number of iterations. + let mut iterations = 0; + // Safe guard because we don't want the test to pass if no substream has been open. + let mut something_happened = false; + + loop { + iterations += 1; + if iterations >= 1_000 { + assert!(something_happened); + break; + } + + // Start by sending a notification from node1 to node2 and vice-versa. Part of the + // test consists in ensuring that notifications get ignored if the stream isn't open. + if rand::random::() % 5 >= 3 { + node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + if rand::random::() % 5 >= 3 { + node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); + } + + // Also randomly disconnect the two nodes from time to time. + if rand::random::() % 20 == 0 { + node1.disconnect_peer(node2.local_peer_id().clone()); + } + if rand::random::() % 20 == 0 { + node2.disconnect_peer(node1.local_peer_id().clone()); + } + + // Grab next event from either `events_stream1` or `events_stream2`. + let next_event = { + let next1 = events_stream1.next(); + let next2 = events_stream2.next(); + // We also await on a small timer, otherwise it is possible for the test to wait + // forever while nothing at all happens on the network. + let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); + match future::select(future::select(next1, next2), continue_test).await { + future::Either::Left((future::Either::Left((Some(ev), _)), _)) => + future::Either::Left(ev), + future::Either::Left((future::Either::Right((Some(ev), _)), _)) => + future::Either::Right(ev), + future::Either::Right(_) => continue, + _ => break, + } + }; + + match next_event { + future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + something_happened = true; + assert!(!node1_to_node2_open); + node1_to_node2_open = true; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { + something_happened = true; + assert!(!node2_to_node1_open); + node2_to_node1_open = true; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert!(node1_to_node2_open); + node1_to_node2_open = false; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { + assert!(node2_to_node1_open); + node2_to_node1_open = false; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationsReceived { remote, .. }) => { + assert!(node1_to_node2_open); + assert_eq!(remote, *node2.local_peer_id()); + if rand::random::() % 5 >= 4 { + node1.write_notification( + node2.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec() + ); + } + } + future::Either::Right(Event::NotificationsReceived { remote, .. }) => { + assert!(node2_to_node1_open); + assert_eq!(remote, *node1.local_peer_id()); + if rand::random::() % 5 >= 4 { + node2.write_notification( + node1.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec() + ); + } + } + + // Add new events here. + future::Either::Left(Event::Dht(_)) => {} + future::Either::Right(Event::Dht(_)) => {} + }; + } + }); +} diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 7fec4f4da8b47ccca2df942dadf729e8357583dd..90202008eb37fe99a1a98009bdce923ebe0dfc1c 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -10,22 +10,25 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-network = { version = "0.8.0-alpha.2", path = "../" } +sc-network = { version = "0.8.0-alpha.5", path = "../" } log = "0.4.8" parking_lot = "0.10.0" -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8.0-alpha.2", path = "../../" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } +libp2p = { version = "0.17.0", default-features = false, features = ["libp2p-websocket"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sc-client = { version = "0.8.0-alpha.5", path = "../../" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../api" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/babe" } env_logger = "0.7.0" substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } tempfile = "3.1.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index e98cf8bada0bcfd8ae431bb62fa9e7f2e357b0c3..fec4f1317b2dd2cc6c200e2939905c2e2f3aa63e 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -32,7 +32,7 @@ use sp_blockchain::{ use sc_client_api::{BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend}; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client::LongestChain; -use sc_network::config::Roles; +use sc_network::config::Role; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_consensus::import_queue::{ BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, @@ -41,7 +41,7 @@ use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::Error as ConsensusError; use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; use futures::prelude::*; -use sc_network::{NetworkWorker, NetworkStateInfo, NetworkService, config::ProtocolId}; +use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; use libp2p::PeerId; use parking_lot::Mutex; @@ -189,7 +189,7 @@ pub struct Peer { impl Peer { /// Get this peer ID. pub fn id(&self) -> PeerId { - self.network.service().local_peer_id() + self.network.service().local_peer_id().clone() } /// Returns true if we're major syncing. @@ -207,6 +207,11 @@ impl Peer { self.network.num_connected_peers() } + /// Returns the number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.network.num_processed_blocks() + } + /// Returns true if we have no peer. pub fn is_offline(&self) -> bool { self.num_peers() == 0 @@ -276,7 +281,8 @@ impl Peer { Default::default() }; self.block_import.import_block(import_block, cache).expect("block_import failed"); - self.network.on_block_imported(header, Vec::new(), true); + self.network.on_block_imported(header, true); + self.network.service().announce_block(hash, Vec::new()); at = hash; } @@ -551,17 +557,17 @@ pub trait TestNetFactory: Sized { for i in 0..n { trace!(target: "test_network", "Adding peer {}", i); - net.add_full_peer(&config); + net.add_full_peer(); } net } - fn add_full_peer(&mut self, config: &ProtocolConfig) { - self.add_full_peer_with_states(config, None) + fn add_full_peer(&mut self) { + self.add_full_peer_with_states(None) } /// Add a full peer. - fn add_full_peer_with_states(&mut self, config: &ProtocolConfig, keep_blocks: Option) { + fn add_full_peer_with_states(&mut self, keep_blocks: Option) { let test_client_builder = match keep_blocks { Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), None => TestClientBuilder::with_default_backend(), @@ -580,7 +586,7 @@ pub trait TestNetFactory: Sized { let verifier = self.make_verifier( PeersClient::Full(client.clone(), backend.clone()), - config, + &Default::default(), &data, ); let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); @@ -594,14 +600,19 @@ pub trait TestNetFactory: Sized { let listen_addr = build_multiaddr![Memory(rand::random::())]; + let mut network_config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + &std::env::current_dir().expect("current directory must exist"), + ); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + let network = NetworkWorker::new(sc_network::config::Params { - roles: config.roles, + role: Role::Full, executor: None, - network_config: NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..NetworkConfiguration::default() - }, + network_config, chain: client.clone(), finality_proof_provider: self.make_finality_proof_provider( PeersClient::Full(client.clone(), backend.clone()), @@ -617,7 +628,7 @@ pub trait TestNetFactory: Sized { self.mut_peers(|peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id(), listen_addr.clone()); + peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); @@ -638,10 +649,7 @@ pub trait TestNetFactory: Sized { } /// Add a light peer. - fn add_light_peer(&mut self, config: &ProtocolConfig) { - let mut config = config.clone(); - config.roles = Roles::LIGHT; - + fn add_light_peer(&mut self) { let (c, backend) = substrate_test_runtime_client::new_light(); let client = Arc::new(c); let ( @@ -654,7 +662,7 @@ pub trait TestNetFactory: Sized { let verifier = self.make_verifier( PeersClient::Light(client.clone(), backend.clone()), - &config, + &Default::default(), &data, ); let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); @@ -668,14 +676,19 @@ pub trait TestNetFactory: Sized { let listen_addr = build_multiaddr![Memory(rand::random::())]; + let mut network_config = NetworkConfiguration::new( + "test-node", + "test-client", + Default::default(), + &std::env::current_dir().expect("current directory must exist"), + ); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + let network = NetworkWorker::new(sc_network::config::Params { - roles: config.roles, + role: Role::Light, executor: None, - network_config: NetworkConfiguration { - listen_addresses: vec![listen_addr.clone()], - transport: TransportConfig::MemoryOnly, - ..NetworkConfiguration::default() - }, + network_config, chain: client.clone(), finality_proof_provider: self.make_finality_proof_provider( PeersClient::Light(client.clone(), backend.clone()) @@ -691,7 +704,7 @@ pub trait TestNetFactory: Sized { self.mut_peers(|peers| { for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id(), listen_addr.clone()); + peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); } let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); @@ -780,9 +793,9 @@ pub trait TestNetFactory: Sized { while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { peer.network.on_block_imported( notification.header, - Vec::new(), true, ); + peer.network.service().announce_block(notification.hash, Vec::new()); } // We poll `finality_notification_stream`, but we only take the last event. diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 388257516832002de97775bbeb21c6696eaae38c..8acf265e9189288758471f69db6873b0418a3edd 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_network::config::Roles; use sp_consensus::BlockOrigin; use std::time::Duration; use futures::executor::block_on; @@ -372,10 +371,8 @@ fn blocks_are_not_announced_by_light_nodes() { // full peer0 is connected to light peer // light peer1 is connected to full peer2 - let mut light_config = ProtocolConfig::default(); - light_config.roles = Roles::LIGHT; - net.add_full_peer(&ProtocolConfig::default()); - net.add_light_peer(&light_config); + net.add_full_peer(); + net.add_light_peer(); // Sync between 0 and 1. net.peer(0).push_blocks(1, false); @@ -384,7 +381,7 @@ fn blocks_are_not_announced_by_light_nodes() { assert_eq!(net.peer(1).client.info().best_number, 1); // Add another node and remove node 0. - net.add_full_peer(&ProtocolConfig::default()); + net.add_full_peer(); net.peers.remove(0); // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. @@ -465,7 +462,7 @@ fn can_not_sync_from_light_peer() { // given the network with 1 full nodes (#0) and 1 light node (#1) let mut net = TestNet::new(1); - net.add_light_peer(&Default::default()); + net.add_light_peer(); // generate some blocks on #0 net.peer(0).push_blocks(1, false); @@ -481,7 +478,7 @@ fn can_not_sync_from_light_peer() { assert_eq!(light_info.best_hash, full0_info.best_hash); // add new full client (#2) && remove #0 - net.add_full_peer(&Default::default()); + net.add_full_peer(); net.peers.remove(0); // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds @@ -511,7 +508,7 @@ fn light_peer_imports_header_from_announce() { // given the network with 1 full nodes (#0) and 1 light node (#1) let mut net = TestNet::new(1); - net.add_light_peer(&Default::default()); + net.add_light_peer(); // let them connect to each other net.block_until_sync(); @@ -583,9 +580,8 @@ fn can_sync_explicit_forks() { fn syncs_header_only_forks() { let _ = ::env_logger::try_init(); let mut net = TestNet::new(0); - let config = ProtocolConfig::default(); - net.add_full_peer_with_states(&config, None); - net.add_full_peer_with_states(&config, Some(3)); + net.add_full_peer_with_states(None); + net.add_full_peer_with_states(Some(3)); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); @@ -657,3 +653,69 @@ fn full_sync_requires_block_body() { net.block_until_idle(); assert_eq!(net.peer(1).client.info().best_number, 0); } + +#[test] +fn imports_stale_once() { + let _ = ::env_logger::try_init(); + + fn import_with_announce(net: &mut TestNet, hash: H256) { + // Announce twice + net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, Vec::new()); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } + + // given the network with 2 full nodes + let mut net = TestNet::new(2); + + // let them connect to each other + net.block_until_sync(); + + // check that NEW block is imported from announce message + let new_hash = net.peer(0).push_blocks(1, false); + import_with_announce(&mut net, new_hash); + assert_eq!(net.peer(1).num_processed_blocks(), 1); + + // check that KNOWN STALE block is imported from announce message + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + import_with_announce(&mut net, known_stale_hash); + assert_eq!(net.peer(1).num_processed_blocks(), 2); +} + +#[test] +fn can_sync_to_peers_with_wrong_common_block() { + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + + net.peer(0).push_blocks(2, true); + net.peer(1).push_blocks(2, true); + let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); + net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); + // wait for connection + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // both peers re-org to the same fork without notifying each other + net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); + let final_hash = net.peer(0).push_blocks(1, false); + + net.block_until_sync(); + + assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); +} + diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 963663ff3ecc5de82401d202823ac490fdfa840b..e7292439e82eb0d4ad27e9f64c897c764b2f18f4 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -10,22 +10,23 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } fnv = "1.0.6" -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0-alpha.2", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-offchain = { version = "2.0.0-alpha.5", path = "../../primitives/offchain" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } parking_lot = "0.10.0" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] hyper = "0.13.2" @@ -33,12 +34,15 @@ hyper-rustls = "0.20" [dev-dependencies] env_logger = "0.7.0" -fdlimit = "0.1" -sc-client-db = { version = "0.8.0-alpha.2", default-features = true, path = "../db/" } -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +fdlimit = "0.1.4" +sc-client-db = { version = "0.8.0-alpha.5", default-features = true, path = "../db/" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } tokio = "0.2" [features] default = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index 7923a767f11f0577828168592360894e2a4e53ab..a64fe0389706c1098eda9f1c2fbd2294843d7597 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -32,11 +32,12 @@ use futures::{prelude::*, future, channel::mpsc}; use log::error; use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; use std::{fmt, io::Read as _, mem, pin::Pin, task::Context, task::Poll}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. pub fn http() -> (HttpApi, HttpWorker) { - let (to_worker, from_api) = mpsc::unbounded(); - let (to_api, from_worker) = mpsc::unbounded(); + let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker"); + let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api"); let api = HttpApi { to_worker, @@ -63,10 +64,10 @@ pub fn http() -> (HttpApi, HttpWorker) { /// to offchain workers. pub struct HttpApi { /// Used to sends messages to the worker. - to_worker: mpsc::UnboundedSender, + to_worker: TracingUnboundedSender, /// Used to receive messages from the worker. /// We use a `Fuse` in order to have an extra protection against panicking. - from_worker: stream::Fuse>, + from_worker: stream::Fuse>, /// Id to assign to the next HTTP request that is started. next_id: HttpRequestId, /// List of HTTP requests in preparation or in progress. @@ -546,9 +547,9 @@ enum WorkerToApi { /// Must be continuously polled for the [`HttpApi`] to properly work. pub struct HttpWorker { /// Used to sends messages to the `HttpApi`. - to_api: mpsc::UnboundedSender, + to_api: TracingUnboundedSender, /// Used to receive messages from the `HttpApi`. - from_api: mpsc::UnboundedReceiver, + from_api: TracingUnboundedReceiver, /// The engine that runs HTTP requests. http_client: hyper::Client, hyper::Body>, /// HTTP requests that are being worked on by the engine. diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 27a7f508459ba1bbc3cc4ba0eef0ed2a7c5f9770..94850e3fd3461c3cf79ecd48e905051991324941 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -191,7 +191,8 @@ mod tests { at: &BlockId, extrinsic: ::Extrinsic, ) -> Result<(), ()> { - futures::executor::block_on(self.0.submit_one(&at, extrinsic)) + let source = sp_transaction_pool::TransactionSource::Local; + futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) .map(|_| ()) .map_err(|_| ()) } diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index 9e76b8015afcadf897faf017a5ba935270cd5eef..e026c6063a01fbd9e6716d0a3fe3b59cf23302eb 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,7 +3,7 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0" name = "sc-peerset" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" repository = "https://github.com/paritytech/substrate/" @@ -11,11 +11,15 @@ documentation = "https://docs.rs/sc-peerset" [dependencies] -futures = "0.3.1" -libp2p = { version = "0.16.2", default-features = false } +futures = "0.3.4" +libp2p = { version = "0.17.0", default-features = false } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils"} log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" [dev-dependencies] rand = "0.7.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 87ed2336aea9c65da74aafbc53216ce0a6ffaff7..9376e9594b4b702dc3746661196065e4851c78b9 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -20,11 +20,12 @@ mod peersstate; use std::{collections::{HashSet, HashMap}, collections::VecDeque}; -use futures::{prelude::*, channel::mpsc}; +use futures::prelude::*; use log::{debug, error, trace}; use serde_json::json; use std::{pin::Pin, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; pub use libp2p::PeerId; @@ -73,7 +74,7 @@ impl ReputationChange { /// Shared handle to the peer set manager (PSM). Distributed around the code. #[derive(Debug, Clone)] pub struct PeersetHandle { - tx: mpsc::UnboundedSender, + tx: TracingUnboundedSender, } impl PeersetHandle { @@ -163,14 +164,14 @@ pub struct PeersetConfig { /// > otherwise it will not be able to connect to them. pub bootnodes: Vec, - /// If true, we only accept reserved nodes. + /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. pub reserved_only: bool, - /// List of nodes that we should always be connected to. + /// Lists of nodes we should always be connected to. /// /// > **Note**: Keep in mind that the networking has to know an address for these nodes, /// > otherwise it will not be able to connect to them. - pub reserved_nodes: Vec, + pub priority_groups: Vec<(String, HashSet)>, } /// Side of the peer set manager owned by the network. In other words, the "receiving" side. @@ -183,9 +184,9 @@ pub struct Peerset { /// If true, we only accept reserved nodes. reserved_only: bool, /// Receiver for messages from the `PeersetHandle` and from `tx`. - rx: mpsc::UnboundedReceiver, + rx: TracingUnboundedReceiver, /// Sending side of `rx`. - tx: mpsc::UnboundedSender, + tx: TracingUnboundedSender, /// Queue of messages to be emitted when the `Peerset` is polled. message_queue: VecDeque, /// When the `Peerset` was created. @@ -197,7 +198,7 @@ pub struct Peerset { impl Peerset { /// Builds a new peerset from the given configuration. pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); let handle = PeersetHandle { tx: tx.clone(), @@ -215,7 +216,10 @@ impl Peerset { latest_time_update: now, }; - peerset.data.set_priority_group(RESERVED_NODES, config.reserved_nodes.into_iter().collect()); + for (group, nodes) in config.priority_groups { + peerset.data.set_priority_group(&group, nodes); + } + for peer_id in config.bootnodes { if let peersstate::Peer::Unknown(entry) = peerset.data.peer(&peer_id) { entry.discover(); @@ -597,7 +601,7 @@ mod tests { out_peers: 2, bootnodes: vec![bootnode], reserved_only: true, - reserved_nodes: Vec::new(), + priority_groups: Vec::new(), }; let (peerset, handle) = Peerset::from_config(config); @@ -625,7 +629,7 @@ mod tests { out_peers: 1, bootnodes: vec![bootnode.clone()], reserved_only: false, - reserved_nodes: Vec::new(), + priority_groups: Vec::new(), }; let (mut peerset, _handle) = Peerset::from_config(config); @@ -652,7 +656,7 @@ mod tests { out_peers: 2, bootnodes: vec![bootnode.clone()], reserved_only: false, - reserved_nodes: vec![], + priority_groups: vec![], }; let (mut peerset, _handle) = Peerset::from_config(config); @@ -673,7 +677,7 @@ mod tests { out_peers: 25, bootnodes: vec![], reserved_only: false, - reserved_nodes: vec![], + priority_groups: vec![], }); // We ban a node by setting its reputation under the threshold. diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index c2b0b44a3a95d43f30210efc73563e4e0ae520a3..44477cec6589dbf2b6ce5922ae4ac3217fe9e973 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -43,12 +43,15 @@ fn test_once() { known_nodes.insert(id.clone()); id }).collect(), - reserved_nodes: (0 .. Uniform::new_inclusive(0, 2).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - reserved_nodes.insert(id.clone()); - id - }).collect(), + priority_groups: { + let list = (0 .. Uniform::new_inclusive(0, 2).sample(&mut rng)).map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + reserved_nodes.insert(id.clone()); + id + }).collect(); + vec![("reserved".to_owned(), list)] + }, reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index 0f3b72e519ba5cbff72236a885deb54101347594..10b4b1746e2f6834723968a38778a5293db413b4 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-api" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,19 +9,23 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate RPC interfaces." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } jsonrpc-core = "14.0.3" -jsonrpc-core-client = "14.0.3" +jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.3" jsonrpc-pubsub = "14.0.3" log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-alpha.2"} +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-version = { version = "2.0.0-alpha.5", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-alpha.5"} +sp-chain-spec = { path = "../../primitives/chain-spec" , version = "2.0.0-alpha.5"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0-alpha.2", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "2.0.0-alpha.5", path = "../../primitives/rpc" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index b2cf8ce909b20fead05a3be7bdb9d0d93d6fade1..d29e46a4b5637cf85d0745d59a73b0ad69afde7c 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -136,6 +136,14 @@ pub trait StateApi { hash: Option ) -> FutureResult>>; + /// Query storage entries (by key) starting at block hash given as the second parameter. + #[rpc(name = "state_queryStorageAt")] + fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> FutureResult>>; + /// New runtime version subscription #[pubsub( subscription = "state_runtimeVersion", diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 572136aeb6c40f515276c5c727798602b09559fd..46461d69888c18b86b4de44eb9c1f4941547a103 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -18,10 +18,7 @@ use std::fmt; use serde::{Serialize, Deserialize}; -use serde_json::{Value, map::Map}; - -/// Node properties -pub type Properties = Map; +use sp_chain_spec::{Properties, ChainType}; /// Running node's static details. #[derive(Clone, Debug)] @@ -34,6 +31,8 @@ pub struct SystemInfo { pub chain_name: String, /// A custom set of properties defined in the chain spec. pub properties: Properties, + /// The type of this chain. + pub chain_type: ChainType, } /// Health struct returned by the RPC @@ -83,8 +82,8 @@ pub enum NodeRole { LightClient, /// The node is an authority Authority, - /// An unknown role with a bit number - UnknownRole(u8) + /// The node is a sentry + Sentry, } #[cfg(test)] diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index f12a11e0b36dd7edf14c80e5a8d99e3f995f8b0e..25f147b694b47c07a0c097cbeae5ba217d60d6cc 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -25,7 +25,7 @@ use futures::{future::BoxFuture, compat::Compat}; use self::error::Result as SystemResult; -pub use self::helpers::{Properties, SystemInfo, Health, PeerInfo, NodeRole}; +pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole}; pub use self::gen_client::Client as SystemClient; /// Substrate system RPC API @@ -39,13 +39,17 @@ pub trait SystemApi { #[rpc(name = "system_version")] fn system_version(&self) -> SystemResult; - /// Get the chain's type. Given as a string identifier. + /// Get the chain's name. Given as a string identifier. #[rpc(name = "system_chain")] fn system_chain(&self) -> SystemResult; + /// Get the chain's type. + #[rpc(name = "system_chainType")] + fn system_type(&self) -> SystemResult; + /// Get a custom set of properties as a JSON object, defined in the chain spec. #[rpc(name = "system_properties")] - fn system_properties(&self) -> SystemResult; + fn system_properties(&self) -> SystemResult; /// Return health status of the node. /// diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 79d2984c2293e0649456ce5453f0dd719cfe77ad..c834d7dbf7396b89a6a72ccaaf7feca3059e7507 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc-server" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -14,8 +14,11 @@ pubsub = { package = "jsonrpc-pubsub", version = "14.0.3" } log = "0.4.8" serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] http = { package = "jsonrpc-http-server", version = "14.0.3" } ws = { package = "jsonrpc-ws-server", version = "14.0.3" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index f99e25b14ee2a9bb7905a3ab42cf119af05a728e..f852c452d726fb133920eb7db3a1399dc1409e0e 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,37 +9,41 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate Client RPC" [dependencies] -sc-rpc-api = { version = "0.8.0-alpha.2", path = "../rpc-api" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-client = { version = "0.8.0-alpha.2", path = "../" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-rpc-api = { version = "0.8.0-alpha.5", path = "../rpc-api" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-client = { version = "0.8.0-alpha.5", path = "../" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.3.0" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.0.3" log = "0.4.8" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } rpc = { package = "jsonrpc-core", version = "14.0.3" } -sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } +sp-version = { version = "2.0.0-alpha.5", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0-alpha.2", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0-alpha.2", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-rpc = { version = "2.0.0-alpha.2", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../client/block-builder" } -sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-session = { version = "2.0.0-alpha.5", path = "../../primitives/session" } +sp-offchain = { version = "2.0.0-alpha.5", path = "../../primitives/offchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-rpc = { version = "2.0.0-alpha.5", path = "../../primitives/rpc" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sp-chain-spec = { version = "2.0.0-alpha.5", path = "../../primitives/chain-spec" } +sc-executor = { version = "0.8.0-alpha.5", path = "../executor" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../client/block-builder" } +sc-keystore = { version = "2.0.0-alpha.5", path = "../keystore" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -rustc-hex = "2.0.1" -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../transaction-pool" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 80a3a4349ed82f5fa2ebe04d96cf24d28e18ae5e..a3f23e8e1437a146932410d08ce6835b40907a6d 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -37,7 +37,7 @@ use sp_core::{Bytes, traits::BareCryptoStorePtr}; use sp_api::ProvideRuntimeApi; use sp_runtime::generic; use sp_transaction_pool::{ - TransactionPool, InPoolTransaction, TransactionStatus, + TransactionPool, InPoolTransaction, TransactionStatus, TransactionSource, BlockHash, TxHash, TransactionFor, error::IntoPoolError, }; use sp_session::SessionKeys; @@ -75,6 +75,14 @@ impl Author { } } + +/// Currently we treat all RPC transactions as externals. +/// +/// Possibly in the future we could allow opt-in for special treatment +/// of such transactions, so that the block authors can inject +/// some unique transactions via RPC and have them included in the pool. +const TX_SOURCE: TransactionSource = TransactionSource::External; + impl AuthorApi, BlockHash

> for Author where P: TransactionPool + Sync + Send + 'static, @@ -127,7 +135,7 @@ impl AuthorApi, BlockHash

> for Author }; let best_block_hash = self.client.info().best_hash; Box::new(self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), xt) + .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) .compat() .map_err(|e| e.into_pool_error() .map(Into::into) @@ -173,7 +181,7 @@ impl AuthorApi, BlockHash

> for Author .map_err(error::Error::from)?; Ok( self.pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), dxt) + .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) .map_err(|e| e.into_pool_error() .map(error::Error::from) .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 3093cd9d3b759b08d887eb10165e26f697a9a073..8b956c23a5e660cd685c9d6381395068decb4ebc 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -21,7 +21,8 @@ use assert_matches::assert_matches; use codec::Encode; use sp_core::{ H256, blake2_256, hexdisplay::HexDisplay, testing::{ED25519, SR25519, KeyStore}, - traits::BareCryptoStorePtr, ed25519, crypto::{Pair, Public}, + traits::BareCryptoStorePtr, ed25519, sr25519, + crypto::{CryptoTypePublicPair, Pair, Public}, }; use rpc::futures::Stream as _; use substrate_test_runtime_client::{ @@ -173,7 +174,7 @@ fn should_return_pending_extrinsics() { let ex = uxt(AccountKeyring::Alice, 0); AuthorApi::submit_extrinsic(&p, ex.encode().into()).wait().unwrap(); - assert_matches!( + assert_matches!( p.pending_extrinsics(), Ok(ref expected) if *expected == vec![Bytes(ex.encode())] ); @@ -199,7 +200,7 @@ fn should_remove_extrinsics() { hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), ]).unwrap(); - assert_eq!(removed.len(), 3); + assert_eq!(removed.len(), 3); } #[test] @@ -215,10 +216,9 @@ fn should_insert_key() { key_pair.public().0.to_vec().into(), ).expect("Insert key"); - let store_key_pair = setup.keystore.read() - .ed25519_key_pair(ED25519, &key_pair.public()).expect("Key exists in store"); + let public_keys = setup.keystore.read().keys(ED25519).unwrap(); - assert_eq!(key_pair.public(), store_key_pair.public()); + assert!(public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); } #[test] @@ -231,18 +231,11 @@ fn should_rotate_keys() { let session_keys = SessionKeys::decode(&mut &new_public_keys[..]) .expect("SessionKeys decode successfully"); - let ed25519_key_pair = setup.keystore.read().ed25519_key_pair( - ED25519, - &session_keys.ed25519.clone().into(), - ).expect("ed25519 key exists in store"); - - let sr25519_key_pair = setup.keystore.read().sr25519_key_pair( - SR25519, - &session_keys.sr25519.clone().into(), - ).expect("sr25519 key exists in store"); + let ed25519_public_keys = setup.keystore.read().keys(ED25519).unwrap(); + let sr25519_public_keys = setup.keystore.read().keys(SR25519).unwrap(); - assert_eq!(session_keys.ed25519, ed25519_key_pair.public().into()); - assert_eq!(session_keys.sr25519, sr25519_key_pair.public().into()); + assert!(ed25519_public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); + assert!(sr25519_public_keys.contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); } #[test] diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 82568866ee3ba4df6686d9201783f7cf8cfa673e..2747405c04fcac0c7534ea802bb5e1f08a7e1911 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -163,6 +163,13 @@ pub trait StateBackend: Send + Sync + 'static keys: Vec, ) -> FutureResult>>; + /// Query storage entries (by key) starting at block hash given as the second parameter. + fn query_storage_at( + &self, + keys: Vec, + at: Option + ) -> FutureResult>>; + /// New runtime version subscription fn subscribe_runtime_version( &self, @@ -357,6 +364,14 @@ impl StateApi for State self.backend.query_storage(from, to, keys) } + fn query_storage_at( + &self, + keys: Vec, + at: Option + ) -> FutureResult>> { + self.backend.query_storage_at(keys, at) + } + fn subscribe_storage( &self, meta: Self::Metadata, diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index b7589d2aefecafc9daa41e2122461c21f53c93d2..bf80820543102483ae5c41a301ad1ab0d322890d 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -33,7 +33,7 @@ use sp_core::{ }; use sp_version::RuntimeVersion; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion}, + generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, }; use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; @@ -94,8 +94,8 @@ impl FullState let from_meta = self.client.header_metadata(from).map_err(invalid_block_err)?; let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?; - if from_meta.number >= to_meta.number { - return Err(invalid_block_range(&from_meta, &to_meta, "from number >= to number".to_owned())) + if from_meta.number > to_meta.number { + return Err(invalid_block_range(&from_meta, &to_meta, "from number > to number".to_owned())) } // check if we can get from `to` to `from` by going through parent_hashes. @@ -122,7 +122,10 @@ impl FullState .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) .map_err(client_err)?; let filtered_range_begin = changes_trie_range - .map(|(begin, _)| (begin - from_number).saturated_into::()); + .and_then(|(begin, _)| { + // avoids a corner case where begin < from_number (happens when querying genesis) + begin.checked_sub(&from_number).map(|x| x.saturated_into::()) + }); let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); Ok(QueryStorageRange { @@ -398,6 +401,15 @@ impl StateBackend for FullState, + at: Option + ) -> FutureResult>> { + let at = at.unwrap_or_else(|| self.client.info().best_hash); + self.query_storage(at, Some(at), keys) + } + fn subscribe_runtime_version( &self, _meta: crate::metadata::Metadata, diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 59c0f2183cf898f0677ec9e39b843378244a7fda..092419ad0129e6f4109a19d3f916dc0eb24f5738 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -331,6 +331,14 @@ impl StateBackend for LightState, + _at: Option + ) -> FutureResult>> { + Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) + } + fn subscribe_storage( &self, _meta: crate::metadata::Metadata, diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 75ce4ed9c3c4d7e89a4bfb8d691e9a06d2654c7c..4a9b701959c8c292a3b4dba25d89762fedfd8f62 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -30,6 +30,7 @@ use substrate_test_runtime_client::{ sp_consensus::BlockOrigin, runtime, }; +use sp_runtime::generic::BlockId; const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); @@ -212,7 +213,7 @@ fn should_send_initial_storage_changes_and_notifications() { #[test] fn should_query_storage() { - fn run_tests(mut client: Arc) { + fn run_tests(mut client: Arc, has_changes_trie_config: bool) { let core = tokio::runtime::Runtime::new().unwrap(); let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); @@ -237,6 +238,13 @@ fn should_query_storage() { let block2_hash = add_block(1); let genesis_hash = client.genesis_hash(); + if has_changes_trie_config { + assert_eq!( + client.max_key_changes_range(1, BlockId::Hash(block1_hash)).unwrap(), + Some((0, BlockId::Hash(block1_hash))), + ); + } + let mut expected = vec![ StorageChangeSet { block: genesis_hash, @@ -306,7 +314,7 @@ fn should_query_storage() { Err(Error::InvalidBlockRange { from: format!("1 ({:?})", block1_hash), to: format!("0 ({:?})", genesis_hash), - details: "from number >= to number".to_owned(), + details: "from number > to number".to_owned(), }).map_err(|e| e.to_string()) ); @@ -376,12 +384,39 @@ fn should_query_storage() { details: format!("UnknownBlock: header not found in db: {}", random_hash1), }).map_err(|e| e.to_string()), ); + + // single block range + let result = api.query_storage_at( + keys.clone(), + Some(block1_hash), + ); + + assert_eq!( + result.wait().unwrap(), + vec![ + StorageChangeSet { + block: block1_hash, + changes: vec![ + (StorageKey(vec![1_u8]), None), + (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), + (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), + (StorageKey(vec![4_u8]), None), + (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), + ] + } + ] + ); } - run_tests(Arc::new(substrate_test_runtime_client::new())); - run_tests(Arc::new(TestClientBuilder::new() - .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) - .build())); + run_tests(Arc::new(substrate_test_runtime_client::new()), false); + run_tests( + Arc::new( + TestClientBuilder::new() + .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) + .build(), + ), + true, + ); } #[test] @@ -403,7 +438,7 @@ fn should_return_runtime_version() { let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",2],\ - [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",1],[\"0x40fe3ad401f8959a\",4],\ + [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",2],[\"0x40fe3ad401f8959a\",4],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",1],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]]}"; diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 3a9ed9f2dcc58a2ad8b3e4064014883085443199..e18d4d09a1f1a6a8e4ba472b982116818c7f2037 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -20,20 +20,21 @@ mod tests; use futures::{future::BoxFuture, FutureExt, TryFutureExt}; -use futures::{channel::{mpsc, oneshot}, compat::Compat}; +use futures::{channel::oneshot, compat::Compat}; use sc_rpc_api::Receiver; +use sp_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; use self::error::Result; pub use sc_rpc_api::system::*; -pub use self::helpers::{Properties, SystemInfo, Health, PeerInfo, NodeRole}; +pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole}; pub use self::gen_client::Client as SystemClient; /// System API implementation pub struct System { info: SystemInfo, - send_back: mpsc::UnboundedSender>, + send_back: TracingUnboundedSender>, } /// Request to be processed. @@ -59,7 +60,7 @@ impl System { /// reading from that channel and answering the requests. pub fn new( info: SystemInfo, - send_back: mpsc::UnboundedSender>, + send_back: TracingUnboundedSender>, ) -> Self { System { info, @@ -81,7 +82,11 @@ impl SystemApi::Number> for Sy Ok(self.info.chain_name.clone()) } - fn system_properties(&self) -> Result { + fn system_type(&self) -> Result { + Ok(self.info.chain_type.clone()) + } + + fn system_properties(&self) -> Result { Ok(self.info.properties.clone()) } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 4487566e44cfd9fc18b41112d9ec66555c728196..f0331f50edff77cd8a2a2b837ff0c6499dee4cf5 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -17,7 +17,7 @@ use super::*; use sc_network::{self, PeerId}; -use sc_network::config::Roles; +use sc_network::config::Role; use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; use futures::{prelude::*, channel::mpsc}; @@ -60,7 +60,7 @@ fn api>>(sync: T) -> System { for _peer in 0..status.peers { peers.push(PeerInfo { peer_id: status.peer_id.to_base58(), - roles: format!("{:?}", Roles::FULL), + roles: format!("{}", Role::Full), protocol_version: 1, best_hash: Default::default(), best_number: 1, @@ -105,6 +105,7 @@ fn api>>(sync: T) -> System { impl_version: "0.2.0".into(), chain_name: "testchain".into(), properties: Default::default(), + chain_type: Default::default(), }, tx) } @@ -117,7 +118,7 @@ fn wait_receiver(rx: Receiver) -> T { fn system_name_works() { assert_eq!( api(None).system_name().unwrap(), - "testclient".to_owned() + "testclient".to_owned(), ); } @@ -125,7 +126,7 @@ fn system_name_works() { fn system_version_works() { assert_eq!( api(None).system_version().unwrap(), - "0.2.0".to_owned() + "0.2.0".to_owned(), ); } @@ -133,7 +134,7 @@ fn system_version_works() { fn system_chain_works() { assert_eq!( api(None).system_chain().unwrap(), - "testchain".to_owned() + "testchain".to_owned(), ); } @@ -141,7 +142,15 @@ fn system_chain_works() { fn system_properties_works() { assert_eq!( api(None).system_properties().unwrap(), - serde_json::map::Map::new() + serde_json::map::Map::new(), + ); +} + +#[test] +fn system_type_works() { + assert_eq!( + api(None).system_type().unwrap(), + Default::default(), ); } diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index ba376ed329b9dc4fb72594077012bed3feede30b..634d774a313d20593485a80a2c780911c2aea501 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-service" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -20,7 +20,7 @@ wasmtime = [ [dependencies] derive_more = "0.99.2" futures01 = { package = "futures", version = "0.1.29" } -futures = "0.3.1" +futures = "0.3.4" futures-diagnose = "1.0" parking_lot = "0.10.0" lazy_static = "1.4.0" @@ -31,38 +31,49 @@ wasm-timer = "0.2" exit-future = "0.2.0" serde = "1.0.101" serde_json = "1.0.41" -sysinfo = "0.11.7" -target_info = "0.1.0" -sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-session = { version = "2.0.0-alpha.2", path = "../../primitives/session" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sc-network = { version = "0.8.0-alpha.2", path = "../network" } -sc-chain-spec = { version = "2.0.0-alpha.2", path = "../chain-spec" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sc-client = { version = "0.8.0-alpha.2", path = "../" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sc-client-db = { version = "0.8.0-alpha.2", path = "../db" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0-alpha.2", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0-alpha.2", path = "../rpc" } -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } -sc-offchain = { version = "2.0.0-alpha.2", path = "../offchain" } +sysinfo = "0.12.0" +sc-keystore = { version = "2.0.0-alpha.5", path = "../keystore" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-session = { version = "2.0.0-alpha.5", path = "../../primitives/session" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sc-network = { version = "0.8.0-alpha.5", path = "../network" } +sc-chain-spec = { version = "2.0.0-alpha.5", path = "../chain-spec" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sc-client = { version = "0.8.0-alpha.5", path = "../" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sc-client-db = { version = "0.8.0-alpha.5", path = "../db" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sc-executor = { version = "0.8.0-alpha.5", path = "../executor" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "2.0.0-alpha.5", path = "../rpc-servers" } +sc-rpc = { version = "2.0.0-alpha.5", path = "../rpc" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } +sc-offchain = { version = "2.0.0-alpha.5", path = "../offchain" } parity-multiaddr = { package = "parity-multiaddr", version = "0.7.3" } -prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-alpha.2"} -sc-tracing = { version = "2.0.0-alpha.2", path = "../tracing" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-alpha.5"} +sc-tracing = { version = "2.0.0-alpha.5", path = "../tracing" } tracing = "0.1.10" -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } + + +[target.'cfg(any(unix, windows))'.dependencies] +netstat2 = "0.8.1" + +[target.'cfg(target_os = "linux")'.dependencies] +procfs = '0.7.8' + [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } +sp-consensus-babe = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.8.0-alpha.5", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "2.0.0-alpha.5", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 7237a55377085840937f65b8163727579668fd49..0eefbe730f83bf9496fb02ff9bd271276ed673fb 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -18,6 +18,7 @@ use crate::{Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL use crate::{TaskManagerBuilder, start_rpc_servers, build_network_future, TransactionPoolAdapter}; use crate::status_sinks; use crate::config::{Configuration, DatabaseConfig, KeystoreConfig, PrometheusConfig}; +use crate::metrics::MetricsService; use sc_client_api::{ self, BlockchainEvents, @@ -25,22 +26,22 @@ use sc_client_api::{ execution_extensions::ExtensionsFactory, ExecutorProvider, CallExecutor }; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sc_client::Client; use sc_chain_spec::get_extension; use sp_consensus::import_queue::ImportQueue; use futures::{ Future, FutureExt, StreamExt, - channel::mpsc, future::ready, }; use sc_keystore::{Store as Keystore}; use log::{info, warn, error}; -use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; use sc_network::{NetworkService, NetworkStateInfo}; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, UniqueSaturatedInto, + Block as BlockT, NumberFor, SaturatedConversion, HashFor, }; use sp_api::ProvideRuntimeApi; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; @@ -49,56 +50,9 @@ use std::{ marker::PhantomData, sync::Arc, pin::Pin }; use wasm_timer::SystemTime; -use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; use sp_blockchain; -use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; - -struct ServiceMetrics { - block_height_number: GaugeVec, - ready_transactions_number: Gauge, - memory_usage_bytes: Gauge, - cpu_usage_percentage: Gauge, - network_per_sec_bytes: GaugeVec, - database_cache: Gauge, - state_cache: Gauge, - state_db: GaugeVec, -} - -impl ServiceMetrics { - fn register(registry: &Registry) -> Result { - Ok(Self { - block_height_number: register(GaugeVec::new( - Opts::new("block_height_number", "Height of the chain"), - &["status"] - )?, registry)?, - ready_transactions_number: register(Gauge::new( - "ready_transactions_number", "Number of transactions in the ready queue", - )?, registry)?, - memory_usage_bytes: register(Gauge::new( - "memory_usage_bytes", "Node memory usage", - )?, registry)?, - cpu_usage_percentage: register(Gauge::new( - "cpu_usage_percentage", "Node CPU usage", - )?, registry)?, - network_per_sec_bytes: register(GaugeVec::new( - Opts::new("network_per_sec_bytes", "Networking bytes per second"), - &["direction"] - )?, registry)?, - database_cache: register(Gauge::new( - "database_cache_bytes", "RocksDB cache size in bytes", - )?, registry)?, - state_cache: register(Gauge::new( - "state_cache_bytes", "State cache size in bytes", - )?, registry)?, - state_db: register(GaugeVec::new( - Opts::new("state_db_cache_bytes", "State DB cache in bytes"), - &["subtype"] - )?, registry)?, - }) - } -} pub type BackgroundTask = Pin + Send>>; @@ -214,7 +168,6 @@ fn new_full_parts( password.clone() )?, KeystoreConfig::InMemory => Keystore::new_in_memory(), - KeystoreConfig::None => return Err("No keystore config provided!".into()), }; let tasks_builder = TaskManagerBuilder::new(); @@ -225,7 +178,7 @@ fn new_full_parts( config.max_runtime_instances, ); - let chain_spec = config.expect_chain_spec(); + let chain_spec = &config.chain_spec; let fork_blocks = get_extension::>(chain_spec.extensions()) .cloned() .unwrap_or_default(); @@ -240,11 +193,11 @@ fn new_full_parts( state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match config.expect_database() { + source: match &config.database { DatabaseConfig::Path { path, cache_size } => sc_client_db::DatabaseSettingsSrc::Path { path: path.clone(), - cache_size: cache_size.clone().map(|u| u as usize), + cache_size: *cache_size, }, DatabaseConfig::Custom(db) => sc_client_db::DatabaseSettingsSrc::Custom(db.clone()), @@ -259,10 +212,11 @@ fn new_full_parts( sc_client_db::new_client( db_config, executor, - config.expect_chain_spec().as_storage_builder(), + chain_spec.as_storage_builder(), fork_blocks, bad_blocks, extensions, + Box::new(tasks_builder.spawn_handle()), config.prometheus_config.as_ref().map(|config| config.registry.clone()), )? }; @@ -334,7 +288,6 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { password.clone() )?, KeystoreConfig::InMemory => Keystore::new_in_memory(), - KeystoreConfig::None => return Err("No keystore config provided!".into()), }; let executor = NativeExecutor::::new( @@ -349,11 +302,11 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), pruning: config.pruning.clone(), - source: match config.expect_database() { + source: match &config.database { DatabaseConfig::Path { path, cache_size } => sc_client_db::DatabaseSettingsSrc::Path { path: path.clone(), - cache_size: cache_size.clone().map(|u| u as usize), + cache_size: *cache_size, }, DatabaseConfig::Custom(db) => sc_client_db::DatabaseSettingsSrc::Custom(db.clone()), @@ -366,6 +319,7 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { sc_client::light::new_fetch_checker::<_, TBl, _>( light_blockchain.clone(), executor.clone(), + Box::new(tasks_builder.spawn_handle()), ), ); let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); @@ -373,8 +327,9 @@ impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { let remote_blockchain = backend.remote_blockchain(); let client = Arc::new(sc_client::light::new_light( backend.clone(), - config.expect_chain_spec().as_storage_builder(), + config.chain_spec.as_storage_builder(), executor, + Box::new(tasks_builder.spawn_handle()), config.prometheus_config.as_ref().map(|config| config.registry.clone()), )?); @@ -817,14 +772,14 @@ ServiceBuilder< )?; // A side-channel for essential tasks to communicate shutdown. - let (essential_failed_tx, essential_failed_rx) = mpsc::unbounded(); + let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); let import_queue = Box::new(import_queue); let chain_info = client.chain_info(); - let chain_spec = config.expect_chain_spec(); + let chain_spec = &config.chain_spec; - let version = config.full_version(); - info!("Highest known block at #{}", chain_info.best_number); + let version = config.impl_version; + info!("📦 Highest known block at #{}", chain_info.best_number); telemetry!( SUBSTRATE_INFO; "node.start"; @@ -837,7 +792,7 @@ ServiceBuilder< .register_transaction_pool(Arc::downgrade(&transaction_pool) as _); let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !config.roles.is_light(), + imports_external_transactions: !matches!(config.role, Role::Light), pool: transaction_pool.clone(), client: client.clone(), executor: tasks_builder.spawn_handle(), @@ -860,7 +815,7 @@ ServiceBuilder< Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone())); let network_params = sc_network::config::Params { - roles: config.roles, + role: config.role.clone(), executor: { let spawn_handle = tasks_builder.spawn_handle(); Some(Box::new(move |fut| { @@ -910,7 +865,7 @@ ServiceBuilder< let offchain = offchain_workers.as_ref().map(Arc::downgrade); let notifications_spawn_handle = tasks_builder.spawn_handle(); let network_state_info: Arc = network.clone(); - let is_validator = config.roles.is_authority(); + let is_validator = config.role.is_authority(); let (import_stream, finality_stream) = ( client.import_notification_stream().map(|n| ChainEvent::NewBlock { @@ -989,114 +944,44 @@ ServiceBuilder< } // Prometheus metrics. - let metrics = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + let mut metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { // Set static metrics. - register(Gauge::::with_opts( - Opts::new( - "build_info", - "A metric with a constant '1' value labeled by name, version, and commit." - ) - .const_label("name", config.impl_name) - .const_label("version", config.impl_version) - .const_label("commit", config.impl_commit), - )?, ®istry)?.set(1); - register(Gauge::::new( - "node_roles", "The roles the node is running as", - )?, ®istry)?.set(u64::from(config.roles.bits())); - - let metrics = ServiceMetrics::register(®istry)?; + + let role_bits = match config.role { + Role::Full => 1u64, + Role::Light => 2u64, + Role::Sentry { .. } => 3u64, + Role::Authority { .. } => 4u64, + }; + let metrics = MetricsService::with_prometheus( + ®istry, + &config.network.node_name, + &config.impl_version, + role_bits, + )?; spawn_handle.spawn( "prometheus-endpoint", prometheus_endpoint::init_prometheus(port, registry).map(drop) ); - Some(metrics) + metrics } else { - None + MetricsService::new() }; // Periodically notify the telemetry. let transaction_pool_ = transaction_pool.clone(); let client_ = client.clone(); - let mut sys = System::new(); - let self_pid = get_current_pid().ok(); - let (state_tx, state_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); + let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); network_status_sinks.lock().push(std::time::Duration::from_millis(5000), state_tx); let tel_task = state_rx.for_each(move |(net_status, _)| { let info = client_.usage_info(); - let best_number = info.chain.best_number.saturated_into::(); - let best_hash = info.chain.best_hash; - let num_peers = net_status.num_connected_peers; - let txpool_status = transaction_pool_.status(); - let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); - let bandwidth_download = net_status.average_download_per_sec; - let bandwidth_upload = net_status.average_upload_per_sec; - let best_seen_block = net_status.best_seen_block - .map(|num: NumberFor| num.unique_saturated_into() as u64); - - // get cpu usage and memory usage of this process - let (cpu_usage, memory) = if let Some(self_pid) = self_pid { - if sys.refresh_process(self_pid) { - let proc = sys.get_process(self_pid) - .expect("Above refresh_process succeeds, this should be Some(), qed"); - (proc.cpu_usage(), proc.memory()) - } else { (0.0, 0) } - } else { (0.0, 0) }; - - telemetry!( - SUBSTRATE_INFO; - "system.interval"; - "peers" => num_peers, - "height" => best_number, - "best" => ?best_hash, - "txcount" => txpool_status.ready, - "cpu" => cpu_usage, - "memory" => memory, - "finalized_height" => finalized_number, - "finalized_hash" => ?info.chain.finalized_hash, - "bandwidth_download" => bandwidth_download, - "bandwidth_upload" => bandwidth_upload, - "used_state_cache_size" => info.usage.as_ref() - .map(|usage| usage.memory.state_cache.as_bytes()) - .unwrap_or(0), - "used_db_cache_size" => info.usage.as_ref() - .map(|usage| usage.memory.database_cache.as_bytes()) - .unwrap_or(0), - "disk_read_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_read) - .unwrap_or(0), - "disk_write_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_written) - .unwrap_or(0), + metrics_service.tick( + &info, + &transaction_pool_.status(), + &net_status, ); - if let Some(metrics) = metrics.as_ref() { - metrics.memory_usage_bytes.set(memory); - metrics.cpu_usage_percentage.set(f64::from(cpu_usage)); - metrics.ready_transactions_number.set(txpool_status.ready as u64); - - metrics.network_per_sec_bytes.with_label_values(&["download"]).set(net_status.average_download_per_sec); - metrics.network_per_sec_bytes.with_label_values(&["upload"]).set(net_status.average_upload_per_sec); - - metrics.block_height_number.with_label_values(&["finalized"]).set(finalized_number); - metrics.block_height_number.with_label_values(&["best"]).set(best_number); - - if let Some(best_seen_block) = best_seen_block { - metrics.block_height_number.with_label_values(&["sync_target"]).set(best_seen_block); - } - - if let Some(info) = info.usage.as_ref() { - metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); - metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); - - metrics.state_db.with_label_values(&["non_canonical"]).set(info.memory.state_db.non_canonical.as_bytes() as u64); - if let Some(pruning) = info.memory.state_db.pruning { - metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); - } - metrics.state_db.with_label_values(&["pinned"]).set(info.memory.state_db.pinned.as_bytes() as u64); - } - } - ready(()) }); @@ -1106,7 +991,7 @@ ServiceBuilder< ); // Periodically send the network state to the telemetry. - let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); + let (netstat_tx, netstat_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); network_status_sinks.lock().push(std::time::Duration::from_secs(30), netstat_tx); let tel_task_2 = netstat_rx.for_each(move |(_, network_state)| { telemetry!( @@ -1122,7 +1007,7 @@ ServiceBuilder< ); // RPC - let (system_rpc_tx, system_rpc_rx) = mpsc::unbounded(); + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); let gen_handler = || { use sc_rpc::{chain, state, author, system, offchain}; @@ -1131,6 +1016,7 @@ ServiceBuilder< impl_name: config.impl_name.into(), impl_version: config.impl_version.into(), properties: chain_spec.properties().clone(), + chain_type: chain_spec.chain_type().clone(), }; let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); @@ -1194,25 +1080,26 @@ ServiceBuilder< spawn_handle.spawn( "network-worker", build_network_future( - config.roles, + config.role.clone(), network_mut, client.clone(), network_status_sinks.clone(), system_rpc_rx, has_bootnodes, + config.announce_block, ), ); - let telemetry_connection_sinks: Arc>>> = Default::default(); + let telemetry_connection_sinks: Arc>>> = Default::default(); // Telemetry let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.roles.is_authority(); + let is_authority = config.role.is_authority(); let network_id = network.local_peer_id().to_base58(); - let name = config.name.clone(); + let name = config.network.node_name.clone(); let impl_name = config.impl_name.to_owned(); let version = version.clone(); - let chain_name = config.expect_chain_spec().name().to_owned(); + let chain_name = config.chain_spec.name().to_owned(); let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { endpoints, @@ -1264,7 +1151,7 @@ ServiceBuilder< Ok(Service { client, - task_manager: tasks_builder.into_task_manager(config.task_executor.ok_or(Error::TaskExecutorRequired)?), + task_manager: tasks_builder.into_task_manager(config.task_executor), network, network_status_sinks, select_chain, diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index c58a2755ddc51eeb4e346a5f3c993edbfd5cf578..12fae3224108a66feed5e593e8de50bb97c8a9cf 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -125,7 +125,7 @@ impl< return std::task::Poll::Ready(Err(From::from(err))); }, }; - info!("Importing {} blocks", c); + info!("📦 Importing {} blocks", c); count = Some(c); c } @@ -185,7 +185,7 @@ impl< } if link.imported_blocks >= count { - info!("Imported {} blocks. Best: #{}", read_block_count, client.chain_info().best_number); + info!("🎉 Imported {} blocks. Best: #{}", read_block_count, client.chain_info().best_number); return std::task::Poll::Ready(Ok(())); } else { diff --git a/client/service/src/config.rs b/client/service/src/config.rs index d9d497d1e999f219b95983f65f272137d94d7031..0515a31c7c656aed89386deaa3e9f5335c9cf729 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -18,60 +18,35 @@ pub use sc_client::ExecutionStrategies; pub use sc_client_db::{kvdb::KeyValueDB, PruningMode}; -pub use sc_network::config::{ExtTransport, NetworkConfiguration, Roles}; +pub use sc_network::Multiaddr; +pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; pub use sc_executor::WasmExecutionMethod; use std::{future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; use sc_chain_spec::ChainSpec; use sp_core::crypto::Protected; -use target_info::Target; -use sc_telemetry::TelemetryEndpoints; +pub use sc_telemetry::TelemetryEndpoints; use prometheus_endpoint::Registry; -/// Executable version. Used to pass version information from the root crate. -#[derive(Clone)] -pub struct VersionInfo { - /// Implementation name. - pub name: &'static str, - /// Implementation version. - pub version: &'static str, - /// SCM Commit hash. - pub commit: &'static str, - /// Executable file name. - pub executable_name: &'static str, - /// Executable file description. - pub description: &'static str, - /// Executable file author. - pub author: &'static str, - /// Support URL. - pub support_url: &'static str, - /// Copyright starting year (x-current year) - pub copyright_start_year: i32, -} - /// Service configuration. pub struct Configuration { /// Implementation name pub impl_name: &'static str, - /// Implementation version + /// Implementation version (see sc-cli to see an example of format) pub impl_version: &'static str, - /// Git commit if any. - pub impl_commit: &'static str, - /// Node roles. - pub roles: Roles, + /// Node role. + pub role: Role, /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. - pub task_executor: Option + Send>>) + Send + Sync>>, + pub task_executor: Arc + Send>>) + Send + Sync>, /// Extrinsic pool configuration. pub transaction_pool: TransactionPoolOptions, /// Network configuration. pub network: NetworkConfiguration, - /// Path to the base configuration directory. - pub config_dir: Option, /// Configuration for the keystore. pub keystore: KeystoreConfig, /// Configuration for the database. - pub database: Option, + pub database: DatabaseConfig, /// Size of internal state cache in Bytes pub state_cache_size: usize, /// Size in percent of cache size dedicated to child tries @@ -79,9 +54,7 @@ pub struct Configuration { /// Pruning settings. pub pruning: PruningMode, /// Chain configuration. - pub chain_spec: Option>, - /// Node name. - pub name: String, + pub chain_spec: Box, /// Wasm execution method. pub wasm_method: WasmExecutionMethod, /// Execution strategies. @@ -105,10 +78,6 @@ pub struct Configuration { pub default_heap_pages: Option, /// Should offchain workers be executed. pub offchain_worker: bool, - /// Sentry mode is enabled, the node's role is AUTHORITY but it should not - /// actively participate in consensus (i.e. no keystores should be passed to - /// consensus modules). - pub sentry_mode: bool, /// Enable authoring even when offline. pub force_authoring: bool, /// Disable GRANDPA when running in validator mode @@ -127,13 +96,13 @@ pub struct Configuration { /// /// The default value is 8. pub max_runtime_instances: usize, + /// Announce block automatically after they have been imported + pub announce_block: bool, } /// Configuration of the client keystore. #[derive(Clone)] pub enum KeystoreConfig { - /// No config supplied. - None, /// Keystore at a path on-disk. Recommended for native nodes. Path { /// The path of the keystore. @@ -149,8 +118,8 @@ impl KeystoreConfig { /// Returns the path for the keystore. pub fn path(&self) -> Option<&Path> { match self { - Self::Path { path, .. } => Some(&path), - Self::None | Self::InMemory => None, + Self::Path { path, .. } => Some(path), + Self::InMemory => None, } } } @@ -163,7 +132,7 @@ pub enum DatabaseConfig { /// Path to the database. path: PathBuf, /// Cache Size for internal database in MiB - cache_size: Option, + cache_size: usize, }, /// A custom implementation of an already-open database. @@ -192,129 +161,9 @@ impl PrometheusConfig { } } -impl Default for Configuration { - /// Create a default config - fn default() -> Self { - Configuration { - impl_name: "parity-substrate", - impl_version: "0.0.0", - impl_commit: "", - chain_spec: None, - config_dir: None, - name: Default::default(), - roles: Roles::FULL, - task_executor: None, - transaction_pool: Default::default(), - network: Default::default(), - keystore: KeystoreConfig::None, - database: None, - state_cache_size: Default::default(), - state_cache_child_ratio: Default::default(), - pruning: PruningMode::default(), - wasm_method: WasmExecutionMethod::Interpreted, - execution_strategies: Default::default(), - rpc_http: None, - rpc_ws: None, - rpc_ws_max_connections: None, - rpc_cors: Some(vec![]), - prometheus_config: None, - telemetry_endpoints: None, - telemetry_external_transport: None, - default_heap_pages: None, - offchain_worker: Default::default(), - sentry_mode: false, - force_authoring: false, - disable_grandpa: false, - dev_key_seed: None, - tracing_targets: Default::default(), - tracing_receiver: Default::default(), - max_runtime_instances: 8, - } - } -} - impl Configuration { - /// Create a default config using `VersionInfo` - pub fn from_version(version: &VersionInfo) -> Self { - let mut config = Configuration::default(); - config.impl_name = version.name; - config.impl_version = version.version; - config.impl_commit = version.commit; - - config - } - - /// Returns full version string of this configuration. - pub fn full_version(&self) -> String { - full_version_from_strs(self.impl_version, self.impl_commit) - } - - /// Implementation id and version. - pub fn client_id(&self) -> String { - format!("{}/v{}", self.impl_name, self.full_version()) - } - - /// Generate a PathBuf to sub in the chain configuration directory - /// if given - pub fn in_chain_config_dir(&self, sub: &str) -> Option { - self.config_dir.clone().map(|mut path| { - path.push("chains"); - path.push(self.expect_chain_spec().id()); - path.push(sub); - path - }) - } - - /// Return a reference to the `ChainSpec` of this `Configuration`. - /// - /// ### Panics - /// - /// This method panic if the `chain_spec` is `None` - pub fn expect_chain_spec(&self) -> &dyn ChainSpec { - &**self.chain_spec.as_ref().expect("chain_spec must be specified") - } - - /// Return a reference to the `DatabaseConfig` of this `Configuration`. - /// - /// ### Panics - /// - /// This method panic if the `database` is `None` - pub fn expect_database(&self) -> &DatabaseConfig { - self.database.as_ref().expect("database must be specified") - } - - /// Returns a string displaying the node role, special casing the sentry mode - /// (returning `SENTRY`), since the node technically has an `AUTHORITY` role but - /// doesn't participate. + /// Returns a string displaying the node role. pub fn display_role(&self) -> String { - if self.sentry_mode { - "SENTRY".to_string() - } else { - self.roles.to_string() - } + self.role.to_string() } - - /// Use in memory keystore config when it is not required at all. - /// - /// This function returns an error if the keystore is already set to something different than - /// `KeystoreConfig::None`. - pub fn use_in_memory_keystore(&mut self) -> Result<(), String> { - match &mut self.keystore { - cfg @ KeystoreConfig::None => { *cfg = KeystoreConfig::InMemory; Ok(()) }, - _ => Err("Keystore config specified when it should not be!".into()), - } - } -} - -/// Returns platform info -pub fn platform() -> String { - let env = Target::env(); - let env_dash = if env.is_empty() { "" } else { "-" }; - format!("{}-{}{}{}", Target::arch(), Target::os(), env_dash, env) -} - -/// Returns full version string, using supplied version and commit. -pub fn full_version_from_strs(impl_version: &str, impl_commit: &str) -> String { - let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; - format!("{}{}{}-{}", impl_version, commit_dash, impl_commit, platform()) } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 0a7d5ff103f3cf2c28abd7577e4a06ff5e8c797d..d5db64ea4681e719049e39729d7532232f9e33b7 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -24,6 +24,7 @@ pub mod config; pub mod chain_ops; pub mod error; +mod metrics; mod builder; mod status_sinks; mod task_manager; @@ -40,7 +41,6 @@ use parking_lot::Mutex; use sc_client::Client; use futures::{ Future, FutureExt, Stream, StreamExt, - channel::mpsc, compat::*, sink::SinkExt, task::{Spawn, FutureObj, SpawnError}, @@ -51,6 +51,7 @@ use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{NumberFor, Block as BlockT}; use parity_util_mem::MallocSizeOf; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; pub use self::error::Error; pub use self::builder::{ @@ -58,9 +59,10 @@ pub use self::builder::{ ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, TFullCallExecutor, TLightCallExecutor, }; -pub use config::{Configuration, Roles, PruningMode}; +pub use config::{Configuration, Role, PruningMode, DatabaseConfig}; pub use sc_chain_spec::{ - ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension + ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, + NoExtension, ChainType, }; pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; @@ -71,6 +73,7 @@ pub use sc_executor::NativeExecutionDispatch; pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] pub use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +pub use sc_tracing::TracingReceiver; pub use task_manager::{TaskManagerBuilder, SpawnTaskHandle}; use task_manager::TaskManager; @@ -98,13 +101,13 @@ pub struct Service { transaction_pool: Arc, /// Send a signal when a spawned essential task has concluded. The next time /// the service future is polled it should complete with an error. - essential_failed_tx: mpsc::UnboundedSender<()>, + essential_failed_tx: TracingUnboundedSender<()>, /// A receiver for spawned essential-tasks concluding. - essential_failed_rx: mpsc::UnboundedReceiver<()>, + essential_failed_rx: TracingUnboundedReceiver<()>, rpc_handlers: sc_rpc_server::RpcHandler, _rpc: Box, _telemetry: Option, - _telemetry_on_connect_sinks: Arc>>>, + _telemetry_on_connect_sinks: Arc>>>, _offchain_workers: Option>, keystore: sc_keystore::KeyStorePtr, marker: PhantomData, @@ -130,7 +133,7 @@ pub trait AbstractService: 'static + Future> + type TransactionPool: TransactionPool + MallocSizeOfWasm; /// Get event stream for telemetry connection established events. - fn telemetry_on_connect_stream(&self) -> futures::channel::mpsc::UnboundedReceiver<()>; + fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()>; /// return a shared instance of Telemetry (if enabled) fn telemetry(&self) -> Option; @@ -171,7 +174,7 @@ pub trait AbstractService: 'static + Future> + -> Arc::Hash>>; /// Returns a receiver that periodically receives a status of the network. - fn network_status(&self, interval: Duration) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; + fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)>; /// Get shared transaction pool instance. fn transaction_pool(&self) -> Arc; @@ -203,8 +206,8 @@ where type SelectChain = TSc; type TransactionPool = TExPool; - fn telemetry_on_connect_stream(&self) -> futures::channel::mpsc::UnboundedReceiver<()> { - let (sink, stream) = futures::channel::mpsc::unbounded(); + fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()> { + let (sink, stream) = tracing_unbounded("mpsc_telemetry_on_connect"); self._telemetry_on_connect_sinks.lock().push(sink); stream } @@ -259,8 +262,8 @@ where self.network.clone() } - fn network_status(&self, interval: Duration) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)> { - let (sink, stream) = mpsc::unbounded(); + fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { + let (sink, stream) = tracing_unbounded("mpsc_network_status"); self.network_status_sinks.lock().push(interval, sink); stream } @@ -322,12 +325,13 @@ fn build_network_future< C: sc_client::BlockchainEvents, H: sc_network::ExHashT > ( - roles: Roles, + role: Role, mut network: sc_network::NetworkWorker, client: Arc, status_sinks: Arc, NetworkState)>>>, - mut rpc_rx: mpsc::UnboundedReceiver>, + mut rpc_rx: TracingUnboundedReceiver>, should_have_peers: bool, + announce_imported_blocks: bool, ) -> impl Future { let mut imported_blocks_stream = client.import_notification_stream().fuse(); let mut finality_notification_stream = client.finality_notification_stream().fuse(); @@ -337,7 +341,11 @@ fn build_network_future< // We poll `imported_blocks_stream`. while let Poll::Ready(Some(notification)) = Pin::new(&mut imported_blocks_stream).poll_next(cx) { - network.on_block_imported(notification.header, Vec::new(), notification.is_new_best); + network.on_block_imported(notification.header, notification.is_new_best); + + if announce_imported_blocks { + network.service().announce_block(notification.hash, Vec::new()); + } } // We poll `finality_notification_stream`, but we only take the last event. @@ -394,17 +402,14 @@ fn build_network_future< sc_rpc::system::Request::NodeRoles(sender) => { use sc_rpc::system::NodeRole; - let node_roles = (0 .. 8) - .filter(|&bit_number| (roles.bits() >> bit_number) & 1 == 1) - .map(|bit_number| match Roles::from_bits(1 << bit_number) { - Some(Roles::AUTHORITY) => NodeRole::Authority, - Some(Roles::LIGHT) => NodeRole::LightClient, - Some(Roles::FULL) => NodeRole::Full, - _ => NodeRole::UnknownRole(bit_number), - }) - .collect(); - - let _ = sender.send(node_roles); + let node_role = match role { + Role::Authority { .. } => NodeRole::Authority, + Role::Light => NodeRole::LightClient, + Role::Full => NodeRole::Full, + Role::Sentry { .. } => NodeRole::Sentry, + }; + + let _ = sender.send(vec![node_role]); } }; } @@ -436,7 +441,7 @@ fn build_network_future< log!( target: "service", if polling_dur >= Duration::from_secs(1) { Level::Warn } else { Level::Trace }, - "Polling the network future took {:?}", + "⚠️ Polling the network future took {:?}", polling_dur ); @@ -621,7 +626,8 @@ where match Decode::decode(&mut &encoded[..]) { Ok(uxt) => { let best_block_id = BlockId::hash(self.client.info().best_hash); - let import_future = self.pool.submit_one(&best_block_id, uxt); + let source = sp_transaction_pool::TransactionSource::External; + let import_future = self.pool.submit_one(&best_block_id, source, uxt); let import_future = import_future .map(move |import_result| { match import_result { @@ -648,7 +654,11 @@ where } fn transaction(&self, hash: &H) -> Option { - self.pool.ready_transaction(hash).map(|tx| tx.data().clone()) + self.pool.ready_transaction(hash) + .and_then( + // Only propagable transactions should be resolved for network service. + |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None } + ) } } @@ -670,6 +680,7 @@ mod tests { Default::default(), Arc::new(FullChainApi::new(client.clone())), ).0); + let source = sp_runtime::transaction_validity::TransactionSource::External; let best = longest_chain.best_chain().unwrap(); let transaction = Transfer { amount: 5, @@ -677,8 +688,12 @@ mod tests { from: AccountKeyring::Alice.into(), to: Default::default(), }.into_signed_tx(); - block_on(pool.submit_one(&BlockId::hash(best.hash()), transaction.clone())).unwrap(); - block_on(pool.submit_one(&BlockId::hash(best.hash()), Extrinsic::IncludeData(vec![1]))).unwrap(); + block_on(pool.submit_one( + &BlockId::hash(best.hash()), source, transaction.clone()), + ).unwrap(); + block_on(pool.submit_one( + &BlockId::hash(best.hash()), source, Extrinsic::IncludeData(vec![1])), + ).unwrap(); assert_eq!(pool.status().ready, 2); // when diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..6b7c32c2d02612239907b09b9b337d960b36e2f3 --- /dev/null +++ b/client/service/src/metrics.rs @@ -0,0 +1,427 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::convert::TryFrom; + +use crate::NetworkStatus; +use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; +use sc_client::ClientInfo; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; +use sp_transaction_pool::PoolStatus; +use sp_utils::metrics::register_globals; + +use sysinfo::{self, ProcessExt, SystemExt}; + +#[cfg(not(target_os = "unknown"))] +use netstat2::{ + TcpState, ProtocolSocketInfo, iterate_sockets_info, AddressFamilyFlags, ProtocolFlags, +}; + +struct PrometheusMetrics { + // system + #[cfg(any(unix, windows))] + load_avg: GaugeVec, + + // process + cpu_usage_percentage: Gauge, + memory_usage_bytes: Gauge, + threads: Gauge, + open_files: GaugeVec, + + #[cfg(any(unix, windows))] + netstat: GaugeVec, + + // -- inner counters + // generic info + block_height: GaugeVec, + number_leaves: Gauge, + ready_transactions_number: Gauge, + + // I/O + network_per_sec_bytes: GaugeVec, + database_cache: Gauge, + state_cache: Gauge, + state_db: GaugeVec, +} + +impl PrometheusMetrics { + fn setup(registry: &Registry, name: &str, version: &str, roles: u64) + -> Result + { + register(Gauge::::with_opts( + Opts::new( + "build_info", + "A metric with a constant '1' value labeled by name, version" + ) + .const_label("name", name) + .const_label("version", version) + )?, ®istry)?.set(1); + + register(Gauge::::new( + "node_roles", "The roles the node is running as", + )?, ®istry)?.set(roles); + + register_globals(registry)?; + + Ok(Self { + // system + #[cfg(any(unix, windows))] + load_avg: register(GaugeVec::new( + Opts::new("load_avg", "System load average"), + &["over"] + )?, registry)?, + + // process + memory_usage_bytes: register(Gauge::new( + "memory_usage_bytes", "Node memory (resident set size) usage", + )?, registry)?, + + cpu_usage_percentage: register(Gauge::new( + "cpu_usage_percentage", "Node CPU usage", + )?, registry)?, + + #[cfg(any(unix, windows))] + netstat: register(GaugeVec::new( + Opts::new("netstat_tcp", "Current TCP connections "), + &["status"] + )?, registry)?, + + threads: register(Gauge::new( + "threads", "Number of threads used by the process", + )?, registry)?, + + open_files: register(GaugeVec::new( + Opts::new("open_file_handles", "Open file handlers held by the process"), + &["fd_type"] + )?, registry)?, + + // --- internal + + // generic internals + block_height: register(GaugeVec::new( + Opts::new("block_height", "Block height info of the chain"), + &["status"] + )?, registry)?, + + number_leaves: register(Gauge::new( + "number_leaves", "Number of known chain leaves (aka forks)", + )?, registry)?, + + ready_transactions_number: register(Gauge::new( + "ready_transactions_number", "Number of transactions in the ready queue", + )?, registry)?, + + // I/ O + network_per_sec_bytes: register(GaugeVec::new( + Opts::new("network_per_sec_bytes", "Networking bytes per second"), + &["direction"] + )?, registry)?, + database_cache: register(Gauge::new( + "database_cache_bytes", "RocksDB cache size in bytes", + )?, registry)?, + state_cache: register(Gauge::new( + "state_cache_bytes", "State cache size in bytes", + )?, registry)?, + state_db: register(GaugeVec::new( + Opts::new("state_db_cache_bytes", "State DB cache in bytes"), + &["subtype"] + )?, registry)?, + }) + } +} + +#[cfg(any(unix, windows))] +#[derive(Default)] +struct ConnectionsCount { + listen: u64, + established: u64, + starting: u64, + closing: u64, + closed: u64, + other: u64 +} + +#[derive(Default)] +struct FdCounter { + paths: u64, + sockets: u64, + net: u64, + pipes: u64, + anon_inode: u64, + mem: u64, + other: u64, +} + +#[derive(Default)] +struct ProcessInfo { + cpu_usage: f64, + memory: u64, + threads: Option, + open_fd: Option, +} + +pub struct MetricsService { + metrics: Option, + #[cfg(not(target_os = "unknown"))] + system: sysinfo::System, + pid: Option, +} + +#[cfg(target_os = "linux")] +impl MetricsService { + fn inner_new(metrics: Option) -> Self { + let process = procfs::process::Process::myself() + .expect("Procfs doesn't fail on unix. qed"); + + Self { + metrics, + system: sysinfo::System::new(), + pid: Some(process.pid), + } + } + + fn process_info(&mut self) -> ProcessInfo { + let pid = self.pid.clone().expect("unix always has a pid. qed"); + let mut info = self.process_info_for(&pid); + let process = procfs::process::Process::new(pid).expect("Our process exists. qed."); + info.threads = process.stat().ok().map(|s| + u64::try_from(s.num_threads).expect("There are no negative thread counts. qed"), + ); + info.open_fd = process.fd().ok().map(|i| + i.into_iter().fold(FdCounter::default(), |mut f, info| { + match info.target { + procfs::process::FDTarget::Path(_) => f.paths += 1, + procfs::process::FDTarget::Socket(_) => f.sockets += 1, + procfs::process::FDTarget::Net(_) => f.net += 1, + procfs::process::FDTarget::Pipe(_) => f.pipes += 1, + procfs::process::FDTarget::AnonInode(_) => f.anon_inode += 1, + procfs::process::FDTarget::MemFD(_) => f.mem += 1, + procfs::process::FDTarget::Other(_,_) => f.other += 1, + }; + f + }) + ); + info + } +} + +#[cfg(all(any(unix, windows), not(target_os = "linux")))] +impl MetricsService { + fn inner_new(metrics: Option) -> Self { + Self { + metrics, + system: sysinfo::System::new(), + pid: sysinfo::get_current_pid().ok(), + } + } + + fn process_info(&mut self) -> ProcessInfo { + self.pid.map(|pid| self.process_info_for(&pid)).unwrap_or_default() + } +} + + +#[cfg(target_os = "unknown")] +impl MetricsService { + fn inner_new(metrics: Option) -> Self { + Self { + metrics, + pid: None, + } + } + + fn process_info(&mut self) -> ProcessInfo { + ProcessInfo::default() + } +} + + +impl MetricsService { + pub fn with_prometheus(registry: &Registry, name: &str, version: &str, roles: u64) + -> Result + { + PrometheusMetrics::setup(registry, name, version, roles).map(|p| { + Self::inner_new(Some(p)) + }) + } + + pub fn new() -> Self { + Self::inner_new(None) + } + + #[cfg(not(target_os = "unknown"))] + fn process_info_for(&mut self, pid: &sysinfo::Pid) -> ProcessInfo { + let mut info = ProcessInfo::default(); + if self.system.refresh_process(*pid) { + let prc = self.system.get_process(*pid) + .expect("Above refresh_process succeeds, this must be Some(), qed"); + info.cpu_usage = prc.cpu_usage().into(); + info.memory = prc.memory(); + } + info + } + + #[cfg(not(target_os = "unknown"))] + fn connections_info(&self) -> Option { + self.pid.as_ref().and_then(|pid| { + let af_flags = AddressFamilyFlags::IPV4 | AddressFamilyFlags::IPV6; + let proto_flags = ProtocolFlags::TCP; + let netstat_pid = *pid as u32; + + iterate_sockets_info(af_flags, proto_flags).ok().map(|iter| + iter.filter_map(|r| + r.ok().and_then(|s| { + match s.protocol_socket_info { + ProtocolSocketInfo::Tcp(info) + if s.associated_pids.contains(&netstat_pid) => Some(info.state), + _ => None + } + }) + ).fold(ConnectionsCount::default(), |mut counter, socket_state| { + match socket_state { + TcpState::Listen => counter.listen += 1, + TcpState::Established => counter.established += 1, + TcpState::Closed => counter.closed += 1, + TcpState::SynSent | TcpState::SynReceived => counter.starting += 1, + TcpState::FinWait1 | TcpState::FinWait2 | TcpState::CloseWait + | TcpState::Closing | TcpState::LastAck => counter.closing += 1, + _ => counter.other += 1 + } + + counter + }) + ) + }) + } + + pub fn tick( + &mut self, + info: &ClientInfo, + txpool_status: &PoolStatus, + net_status: &NetworkStatus, + ) { + + let best_number = info.chain.best_number.saturated_into::(); + let best_hash = info.chain.best_hash; + let num_peers = net_status.num_connected_peers; + let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); + let bandwidth_download = net_status.average_download_per_sec; + let bandwidth_upload = net_status.average_upload_per_sec; + let best_seen_block = net_status.best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); + let process_info = self.process_info(); + + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "peers" => num_peers, + "height" => best_number, + "best" => ?best_hash, + "txcount" => txpool_status.ready, + "cpu" => process_info.cpu_usage, + "memory" => process_info.memory, + "finalized_height" => finalized_number, + "finalized_hash" => ?info.chain.finalized_hash, + "bandwidth_download" => bandwidth_download, + "bandwidth_upload" => bandwidth_upload, + "used_state_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.state_cache.as_bytes()) + .unwrap_or(0), + "used_db_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.database_cache.as_bytes()) + .unwrap_or(0), + "disk_read_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_read) + .unwrap_or(0), + "disk_write_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_written) + .unwrap_or(0), + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics.cpu_usage_percentage.set(process_info.cpu_usage as f64); + // `sysinfo::Process::memory` returns memory usage in KiB and not bytes. + metrics.memory_usage_bytes.set(process_info.memory * 1024); + + if let Some(threads) = process_info.threads { + metrics.threads.set(threads); + } + + if let Some(fd_info) = process_info.open_fd { + metrics.open_files.with_label_values(&["paths"]).set(fd_info.paths); + metrics.open_files.with_label_values(&["mem"]).set(fd_info.mem); + metrics.open_files.with_label_values(&["sockets"]).set(fd_info.sockets); + metrics.open_files.with_label_values(&["net"]).set(fd_info.net); + metrics.open_files.with_label_values(&["pipe"]).set(fd_info.pipes); + metrics.open_files.with_label_values(&["anon_inode"]).set(fd_info.anon_inode); + metrics.open_files.with_label_values(&["other"]).set(fd_info.other); + } + + + metrics.network_per_sec_bytes.with_label_values(&["download"]).set( + net_status.average_download_per_sec, + ); + metrics.network_per_sec_bytes.with_label_values(&["upload"]).set( + net_status.average_upload_per_sec, + ); + + metrics.block_height.with_label_values(&["finalized"]).set(finalized_number); + metrics.block_height.with_label_values(&["best"]).set(best_number); + if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { + metrics.number_leaves.set(leaves); + } + + metrics.ready_transactions_number.set(txpool_status.ready as u64); + + if let Some(best_seen_block) = best_seen_block { + metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); + } + + if let Some(info) = info.usage.as_ref() { + metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); + metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); + + metrics.state_db.with_label_values(&["non_canonical"]).set( + info.memory.state_db.non_canonical.as_bytes() as u64, + ); + if let Some(pruning) = info.memory.state_db.pruning { + metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); + } + metrics.state_db.with_label_values(&["pinned"]).set( + info.memory.state_db.pinned.as_bytes() as u64, + ); + } + + #[cfg(not(target_os = "unknown"))] + { + let load = self.system.get_load_average(); + metrics.load_avg.with_label_values(&["1min"]).set(load.one); + metrics.load_avg.with_label_values(&["5min"]).set(load.five); + metrics.load_avg.with_label_values(&["15min"]).set(load.fifteen); + + if let Some(conns) = self.connections_info() { + metrics.netstat.with_label_values(&["listen"]).set(conns.listen); + metrics.netstat.with_label_values(&["established"]).set(conns.established); + metrics.netstat.with_label_values(&["starting"]).set(conns.starting); + metrics.netstat.with_label_values(&["closing"]).set(conns.closing); + metrics.netstat.with_label_values(&["closed"]).set(conns.closed); + metrics.netstat.with_label_values(&["other"]).set(conns.other); + } + } + } + } +} diff --git a/client/service/src/status_sinks.rs b/client/service/src/status_sinks.rs index 8e189be157be5234882d234fc64ed9709a3918a7..4b1dce52f9a319ee02c2033f0214fffd6fe56a86 100644 --- a/client/service/src/status_sinks.rs +++ b/client/service/src/status_sinks.rs @@ -14,11 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures::{Stream, stream::futures_unordered::FuturesUnordered, channel::mpsc}; +use futures::{Stream, stream::futures_unordered::FuturesUnordered}; use std::time::Duration; use std::pin::Pin; use std::task::{Poll, Context}; use futures_timer::Delay; +use sp_utils::mpsc::TracingUnboundedSender; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. @@ -31,7 +32,7 @@ pub struct StatusSinks { struct YieldAfter { delay: Delay, interval: Duration, - sender: Option>, + sender: Option>, } impl StatusSinks { @@ -45,7 +46,7 @@ impl StatusSinks { /// Adds a sender to the collection. /// /// The `interval` is the time period between two pushes on the sender. - pub fn push(&mut self, interval: Duration, sender: mpsc::UnboundedSender) { + pub fn push(&mut self, interval: Duration, sender: TracingUnboundedSender) { self.entries.push(YieldAfter { delay: Delay::new(interval), interval, @@ -88,7 +89,7 @@ impl StatusSinks { } impl futures::Future for YieldAfter { - type Output = (mpsc::UnboundedSender, Duration); + type Output = (TracingUnboundedSender, Duration); fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = Pin::into_inner(self); diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs index d7041e44b9c1e17eaa2d82e0e8a80467ddcffcb3..7c5862e8535c4bcacdb846f9f228c630ccfc0c3d 100644 --- a/client/service/src/task_manager.rs +++ b/client/service/src/task_manager.rs @@ -22,16 +22,18 @@ use exit_future::Signal; use log::{debug, error}; use futures::{ Future, FutureExt, Stream, - future::select, channel::mpsc, + future::select, compat::*, task::{Spawn, FutureObj, SpawnError}, }; +use sc_client_api::CloneableSpawn; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; /// Type alias for service task executor (usually runtime). pub type ServiceTaskExecutor = Arc + Send>>) + Send + Sync>; /// Type alias for the task scheduler. -pub type TaskScheduler = mpsc::UnboundedSender<(Pin + Send>>, Cow<'static, str>)>; +pub type TaskScheduler = TracingUnboundedSender<(Pin + Send>>, Cow<'static, str>)>; /// Helper struct to setup background tasks execution for service. pub struct TaskManagerBuilder { @@ -43,14 +45,14 @@ pub struct TaskManagerBuilder { /// Sender for futures that must be spawned as background tasks. to_spawn_tx: TaskScheduler, /// Receiver for futures that must be spawned as background tasks. - to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, + to_spawn_rx: TracingUnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, } impl TaskManagerBuilder { /// New asynchronous task manager setup. pub fn new() -> Self { let (signal, on_exit) = exit_future::signal(); - let (to_spawn_tx, to_spawn_rx) = mpsc::unbounded(); + let (to_spawn_tx, to_spawn_rx) = tracing_unbounded("mpsc_task_manager"); Self { on_exit, signal: Some(signal), @@ -118,6 +120,12 @@ impl Spawn for SpawnTaskHandle { } } +impl sc_client_api::CloneableSpawn for SpawnTaskHandle { + fn clone(&self) -> Box { + Box::new(Clone::clone(self)) + } +} + type Boxed01Future01 = Box + Send + 'static>; impl futures01::future::Executor for SpawnTaskHandle { @@ -137,7 +145,7 @@ pub struct TaskManager { /// Sender for futures that must be spawned as background tasks. to_spawn_tx: TaskScheduler, /// Receiver for futures that must be spawned as background tasks. - to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, + to_spawn_rx: TracingUnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, /// How to spawn background tasks. executor: ServiceTaskExecutor, } diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index f27f8803300b30d86f6ad605541bed438788ffb7..39c17420bf571fc7688b728999f3e302bf6a7f23 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -14,12 +14,15 @@ tokio = "0.1.22" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" env_logger = "0.7.0" -fdlimit = "0.1.1" +fdlimit = "0.1.4" futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../service" } -sc-network = { version = "0.8.0-alpha.2", path = "../../network" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8.0-alpha.2", path = "../../" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../../service" } +sc-network = { version = "0.8.0-alpha.5", path = "../../network" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sc-client = { version = "0.8.0-alpha.5", path = "../../" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 259c25ee4d436cc0b953336d0278cdad2239cbe5..2811076ba38b768ca9bcb32ccd8647bc6f7e9ffd 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -34,11 +34,11 @@ use sc_service::{ Configuration, config::{DatabaseConfig, KeystoreConfig}, RuntimeGenesis, - Roles, + Role, Error, }; -use sc_network::{multiaddr, Multiaddr, NetworkStateInfo}; -use sc_network::config::{NetworkConfiguration, TransportConfig, NodeKeyConfig, Secret, NonReservedPeerMode}; +use sc_network::{multiaddr, Multiaddr}; +use sc_network::config::{NetworkConfiguration, TransportConfig}; use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_transaction_pool::TransactionPool; @@ -131,10 +131,10 @@ where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static } } -fn node_config ( +fn node_config ( index: usize, spec: &GenericChainSpec, - role: Roles, + role: Role, task_executor: Arc + Send>>) + Send + Sync>, key_seed: Option, base_port: u16, @@ -143,58 +143,46 @@ fn node_config TestNet where F: AbstractService, L: AbstractService, - E: ChainSpecExtension + Clone + 'static, + E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, { fn new( @@ -266,7 +254,7 @@ impl TestNet where let node_config = node_config( self.nodes, &self.chain_spec, - Roles::AUTHORITY, + Role::Authority { sentry_nodes: Vec::new() }, task_executor, Some(key), self.base_port, @@ -277,7 +265,7 @@ impl TestNet where let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); self.authority_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -287,13 +275,13 @@ impl TestNet where let executor = executor.clone(); Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) }; - let node_config = node_config(self.nodes, &self.chain_spec, Roles::FULL, task_executor, None, self.base_port, &temp); + let node_config = node_config(self.nodes, &self.chain_spec, Role::Full, task_executor, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let (service, user_data) = full(node_config).expect("Error creating test node service"); let service = SyncService::from(service); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); self.full_nodes.push((self.nodes, service, user_data, addr)); self.nodes += 1; } @@ -303,12 +291,12 @@ impl TestNet where let executor = executor.clone(); Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) }; - let node_config = node_config(self.nodes, &self.chain_spec, Roles::LIGHT, task_executor, None, self.base_port, &temp); + let node_config = node_config(self.nodes, &self.chain_spec, Role::Light, task_executor, None, self.base_port, &temp); let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); let service = SyncService::from(light(node_config).expect("Error creating test node service")); executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().into())); + let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); self.light_nodes.push((self.nodes, service, addr)); self.nodes += 1; } @@ -324,7 +312,7 @@ pub fn connectivity( full_builder: Fb, light_builder: Lb, ) where - E: ChainSpecExtension + Clone + 'static, + E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, Fb: Fn(Configuration) -> Result, F: AbstractService, @@ -432,7 +420,7 @@ pub fn sync( B: FnMut(&F, &mut U), ExF: FnMut(&F, &U) -> ::Extrinsic, U: Clone + Send + 'static, - E: ChainSpecExtension + Clone + 'static, + E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; @@ -482,7 +470,12 @@ pub fn sync( let first_user_data = &network.full_nodes[0].2; let best_block = BlockId::number(first_service.get().client().chain_info().best_number); let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); - futures::executor::block_on(first_service.get().transaction_pool().submit_one(&best_block, extrinsic)).unwrap(); + let source = sp_transaction_pool::TransactionSource::External; + + futures::executor::block_on( + first_service.get().transaction_pool().submit_one(&best_block, source, extrinsic) + ).expect("failed to submit extrinsic"); + network.run_until_all_full( |_index, service| service.get().transaction_pool().ready().count() == 1, |_index, _service| true, @@ -499,7 +492,7 @@ pub fn consensus( F: AbstractService, Lb: Fn(Configuration) -> Result, L: AbstractService, - E: ChainSpecExtension + Clone + 'static, + E: ChainSpecExtension + Clone + 'static + Send, G: RuntimeGenesis + 'static, { const NUM_FULL_NODES: usize = 10; diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 3fb6123640c5a4ee83151b275d26263a99efd175..1160449eeef88c251673251d873c76855d7bcab3 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -27,13 +27,14 @@ use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; use sp_externalities::Extensions; use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor}; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; -use sc_client_api::{backend, call_executor::CallExecutor}; +use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; /// Call executor that executes methods locally, querying all required /// data from local backend. pub struct LocalCallExecutor { backend: Arc, executor: E, + spawn_handle: Box, } impl LocalCallExecutor { @@ -41,10 +42,12 @@ impl LocalCallExecutor { pub fn new( backend: Arc, executor: E, + spawn_handle: Box, ) -> Self { LocalCallExecutor { backend, executor, + spawn_handle, } } } @@ -54,6 +57,7 @@ impl Clone for LocalCallExecutor where E: Clone { LocalCallExecutor { backend: self.backend.clone(), executor: self.executor.clone(), + spawn_handle: self.spawn_handle.clone(), } } } @@ -91,6 +95,7 @@ where call_data, extensions.unwrap_or_default(), &state_runtime_code.runtime_code()?, + self.spawn_handle.clone(), ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, @@ -155,34 +160,38 @@ where recorder.clone(), ); - StateMachine::new( + let changes = &mut *changes.borrow_mut(); + let mut state_machine = StateMachine::new( &backend, changes_trie_state, - &mut *changes.borrow_mut(), + changes, &self.executor, method, call_data, extensions.unwrap_or_default(), &runtime_code, - ) + self.spawn_handle.clone(), + ); // TODO: https://github.com/paritytech/substrate/issues/4455 // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - .execute_using_consensus_failure_handler(execution_manager, native_call) + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) }, None => { let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - StateMachine::new( + let runtime_code = state_runtime_code.runtime_code()?; + let changes = &mut *changes.borrow_mut(); + let mut state_machine = StateMachine::new( &state, changes_trie_state, - &mut *changes.borrow_mut(), + changes, &self.executor, method, call_data, extensions.unwrap_or_default(), - &state_runtime_code.runtime_code()?, - ) - .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - .execute_using_consensus_failure_handler(execution_manager, native_call) + &runtime_code, + self.spawn_handle.clone(), + ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); + state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) } }.map_err(Into::into) } @@ -218,6 +227,7 @@ where trie_state, overlay, &self.executor, + self.spawn_handle.clone(), method, call_data, &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, diff --git a/client/src/client.rs b/client/src/client.rs index f6feb0e858e7c51a40856ece7067ad5fed57ce2f..4711ad8b3f1a0e5ac6cdda4628c77abc6e1c9fdc 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -21,7 +21,6 @@ use std::{ result, }; use log::{info, trace, warn}; -use futures::channel::mpsc; use parking_lot::{Mutex, RwLock}; use codec::{Encode, Decode}; use hash_db::Prefix; @@ -76,8 +75,9 @@ pub use sc_client_api::{ }, execution_extensions::{ExecutionExtensions, ExecutionStrategies}, notifications::{StorageNotifications, StorageEventStream}, - CallExecutor, ExecutorProvider, ProofProvider, + CallExecutor, ExecutorProvider, ProofProvider, CloneableSpawn, }; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_blockchain::Error; use prometheus_endpoint::Registry; @@ -93,8 +93,8 @@ pub struct Client where Block: BlockT { backend: Arc, executor: E, storage_notifications: Mutex>, - import_notification_sinks: Mutex>>>, - finality_notification_sinks: Mutex>>>, + import_notification_sinks: Mutex>>>, + finality_notification_sinks: Mutex>>>, // holds the block hash currently being imported. TODO: replace this with block queue importing_block: RwLock>, block_rules: BlockRules, @@ -135,6 +135,7 @@ pub fn new_in_mem( genesis_storage: &S, keystore: Option, prometheus_registry: Option, + spawn_handle: Box, ) -> sp_blockchain::Result, LocalCallExecutor, E>, @@ -145,7 +146,7 @@ pub fn new_in_mem( S: BuildStorage, Block: BlockT, { - new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore, prometheus_registry) + new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore, spawn_handle, prometheus_registry) } /// Create a client with the explicitly provided backend. @@ -155,6 +156,7 @@ pub fn new_with_backend( executor: E, build_genesis_storage: &S, keystore: Option, + spawn_handle: Box, prometheus_registry: Option, ) -> sp_blockchain::Result, Block, RA>> where @@ -163,7 +165,7 @@ pub fn new_with_backend( Block: BlockT, B: backend::LocalBackend + 'static, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor); + let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle); let extensions = ExecutionExtensions::new(Default::default(), keystore); Client::new( backend, @@ -258,7 +260,7 @@ impl Client where backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; let state_root = op.reset_storage(genesis_storage)?; let genesis_block = genesis::construct_genesis_block::(state_root.into()); - info!("Initializing Genesis block/state (state: {}, header-hash: {})", + info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", genesis_block.header().state_root(), genesis_block.header().hash() ); @@ -1124,7 +1126,13 @@ impl ProofProvider for Client where let state = self.state_at(id)?; let header = self.prepare_environment_block(id)?; - prove_execution(state, header, &self.executor, method, call_data).map(|(r, p)| { + prove_execution( + state, + header, + &self.executor, + method, + call_data, + ).map(|(r, p)| { (r, StorageProof::merge(vec![p, code_proof])) }) } @@ -1756,13 +1764,13 @@ where { /// Get block import event stream. fn import_notification_stream(&self) -> ImportNotifications { - let (sink, stream) = mpsc::unbounded(); + let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream"); self.import_notification_sinks.lock().push(sink); stream } fn finality_notification_stream(&self) -> FinalityNotifications { - let (sink, stream) = mpsc::unbounded(); + let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream"); self.finality_notification_sinks.lock().push(sink); stream } @@ -3016,7 +3024,7 @@ pub(crate) mod tests { pruning: PruningMode::ArchiveAll, source: DatabaseSettingsSrc::Path { path: tmp.path().into(), - cache_size: None, + cache_size: 128, } }, u64::max_value(), @@ -3218,7 +3226,7 @@ pub(crate) mod tests { pruning: PruningMode::keep_blocks(1), source: DatabaseSettingsSrc::Path { path: tmp.path().into(), - cache_size: None, + cache_size: 128, } }, u64::max_value(), @@ -3482,6 +3490,7 @@ pub(crate) mod tests { &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), None, None, + sp_core::tasks::executor(), ) .unwrap(); diff --git a/client/src/genesis.rs b/client/src/genesis.rs index 37c0a19a9b809efd72d211b4c041bb3bbf6e571d..2c84ff1e4331af066d43e314d31059740adcce6e 100644 --- a/client/src/genesis.rs +++ b/client/src/genesis.rs @@ -54,6 +54,7 @@ mod tests { AccountKeyring, Sr25519Keyring, }; use sp_runtime::traits::BlakeTwo256; + use sp_core::tasks::executor as tasks_executor; use hex_literal::*; native_executor_instance!( @@ -101,6 +102,7 @@ mod tests { &header.encode(), Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -115,6 +117,7 @@ mod tests { &tx.encode(), Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -129,6 +132,7 @@ mod tests { &[], Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -179,6 +183,7 @@ mod tests { &b1data, Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::NativeElseWasm, ).unwrap(); @@ -210,6 +215,7 @@ mod tests { &b1data, Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::AlwaysWasm, ).unwrap(); @@ -241,6 +247,7 @@ mod tests { &b1data, Default::default(), &runtime_code, + tasks_executor(), ).execute( ExecutionStrategy::NativeElseWasm, ); diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index bdbfdbc7ec8f03cba799075dd84274860b018ab3..3672da1822df1b30f377f64a3bf1d8c8d85dd8d7 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -304,6 +304,7 @@ impl HeaderBackend for Blockchain { genesis_hash: storage.genesis_hash, finalized_hash: storage.finalized_hash, finalized_number: storage.finalized_number, + number_leaves: storage.leaves.count() } } diff --git a/client/src/leaves.rs b/client/src/leaves.rs index 1082e6ca071eac8463f0c95163e4e8a86b1a6cb3..7c169488ddcf4ef2031fd4d45a1e121afb3f622e 100644 --- a/client/src/leaves.rs +++ b/client/src/leaves.rs @@ -195,6 +195,11 @@ impl LeafSet where self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect() } + /// Number of known leaves + pub fn count(&self) -> usize { + self.storage.len() + } + /// Write the leaf list to the database transaction. pub fn prepare_transaction(&mut self, tx: &mut DBTransaction, column: u32, prefix: &[u8]) { let mut buf = prefix.to_vec(); diff --git a/client/src/lib.rs b/client/src/lib.rs index e5365e2e64d4359d4e5f8a660203956bf4e3285a..20a3ed058aa7cb5fd562e87e03aac6ae078d40ab 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -62,6 +62,7 @@ //! LocalCallExecutor::new( //! backend.clone(), //! NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8), +//! sp_core::tasks::executor(), //! ), //! // This parameter provides the storage for the chain genesis. //! &::default(), diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 749e24af046f7678da10f270561d5a6c1aa1aeac..0b334d48b7a251a6173f292e9416e0fa7925d6da 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -505,6 +505,12 @@ impl StateBackend for GenesisOrUnavailableState } } + fn register_overlay_stats(&mut self, _stats: &sp_state_machine::StateMachineStats) { } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + sp_state_machine::UsageInfo::empty() + } + fn as_trie_backend(&mut self) -> Option<&TrieBackend> { match self { GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index a08b0f4c44cf574b9a2f4ebf067ddfc818912eb1..b439a268d2fe18ef4fd1722e7ec2566d4065bea8 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -28,7 +28,7 @@ use sp_runtime::{ use sp_externalities::Extensions; use sp_state_machine::{ self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, + execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, }; use hash_db::Hasher; @@ -216,6 +216,7 @@ pub fn prove_execution( /// Proof should include both environment preparation proof and method execution proof. pub fn check_execution_proof( executor: &E, + spawn_handle: Box, request: &RemoteCallRequest

, remote_proof: StorageProof, ) -> ClientResult> @@ -227,6 +228,7 @@ pub fn check_execution_proof( { check_execution_proof_with_make_header::( executor, + spawn_handle, request, remote_proof, |header|
::new( @@ -241,6 +243,7 @@ pub fn check_execution_proof( fn check_execution_proof_with_make_header Header>( executor: &E, + spawn_handle: Box, request: &RemoteCallRequest
, remote_proof: StorageProof, make_next_header: MakeNextHeader, @@ -267,6 +270,7 @@ fn check_execution_proof_with_make_header( &local_executor(), + tasks_executor(), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), header: remote_header, @@ -414,6 +420,7 @@ mod tests { // check remote execution proof locally let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( &local_executor(), + tasks_executor(), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), header: remote_header, diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index b48e3a3b78dded15bd73cedaed41da79e8f36cbe..0ae0e68e0c8781d0a08e84aceeea40817a1d8873 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -30,7 +30,7 @@ use sp_runtime::traits::{ use sp_state_machine::{ ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, + read_child_proof_check, CloneableSpawn, }; pub use sp_state_machine::StorageProof; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -50,14 +50,15 @@ use crate::light::call_executor::check_execution_proof; pub struct LightDataChecker> { blockchain: Arc>, executor: E, + spawn_handle: Box, _hasher: PhantomData<(B, H)>, } impl> LightDataChecker { /// Create new light data checker. - pub fn new(blockchain: Arc>, executor: E) -> Self { + pub fn new(blockchain: Arc>, executor: E, spawn_handle: Box) -> Self { Self { - blockchain, executor, _hasher: PhantomData + blockchain, executor, spawn_handle, _hasher: PhantomData } } @@ -254,7 +255,12 @@ impl FetchChecker for LightDataChecker request: &RemoteCallRequest, remote_proof: StorageProof, ) -> ClientResult> { - check_execution_proof::<_, _, H>(&self.executor, request, remote_proof) + check_execution_proof::<_, _, H>( + &self.executor, + self.spawn_handle.clone(), + request, + remote_proof, + ) } fn check_changes_proof( @@ -338,7 +344,8 @@ pub mod tests { use sc_client_api::backend::NewBlockState; use substrate_test_runtime_client::{ blockchain::HeaderBackend, AccountKeyring, ClientBlockImportExt, - runtime::{self, Hash, Block, Header, Extrinsic} + runtime::{self, Hash, Block, Header, Extrinsic}, + tasks_executor, }; use sp_consensus::BlockOrigin; @@ -395,7 +402,8 @@ pub mod tests { ).unwrap(); let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor() + local_executor(), + tasks_executor(), ); (local_checker, remote_block_header, remote_read_proof, heap_pages) } @@ -444,6 +452,7 @@ pub mod tests { let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); (local_checker, remote_block_header, remote_read_proof, child_value) } @@ -474,6 +483,7 @@ pub mod tests { let local_checker = LightDataChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); (local_checker, local_cht_root, remote_block_header, remote_header_proof) } @@ -559,6 +569,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; @@ -633,6 +644,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(local_storage)), local_executor(), + tasks_executor(), ); // check proof on local client @@ -667,6 +679,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); let local_checker = &local_checker as &dyn FetchChecker; let max = remote_client.chain_info().best_number; @@ -754,6 +767,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()).is_err()); @@ -764,6 +778,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(local_storage)), local_executor(), + tasks_executor(), ); let result = local_checker.check_changes_tries_proof( 4, &remote_proof.roots, StorageProof::empty() @@ -781,6 +796,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); let body_request = RemoteBodyRequest { @@ -804,6 +820,7 @@ pub mod tests { let local_checker = TestChecker::new( Arc::new(DummyBlockchain::new(DummyStorage::new())), local_executor(), + tasks_executor(), ); let body_request = RemoteBodyRequest { diff --git a/client/src/light/mod.rs b/client/src/light/mod.rs index 865e6e9ac9bfea84a6ef87ac7ddf6ccd12669d7c..2bb6c85376859af3ab0bfb07b8863d86bd544f5c 100644 --- a/client/src/light/mod.rs +++ b/client/src/light/mod.rs @@ -33,7 +33,7 @@ use prometheus_endpoint::Registry; use crate::call_executor::LocalCallExecutor; use crate::client::Client; use sc_client_api::{ - light::Storage as BlockchainStorage, + light::Storage as BlockchainStorage, CloneableSpawn, }; use crate::light::backend::Backend; use crate::light::blockchain::Blockchain; @@ -59,6 +59,7 @@ pub fn new_light( backend: Arc>>, genesis_storage: &dyn BuildStorage, code_executor: E, + spawn_handle: Box, prometheus_registry: Option, ) -> ClientResult< Client< @@ -76,7 +77,7 @@ pub fn new_light( S: BlockchainStorage + 'static, E: CodeExecutor + RuntimeInfo + Clone + 'static, { - let local_executor = LocalCallExecutor::new(backend.clone(), code_executor); + let local_executor = LocalCallExecutor::new(backend.clone(), code_executor, spawn_handle.clone()); let executor = GenesisCallExecutor::new(backend.clone(), local_executor); Client::new( backend, @@ -93,9 +94,10 @@ pub fn new_light( pub fn new_fetch_checker>( blockchain: Arc>, executor: E, + spawn_handle: Box, ) -> LightDataChecker, B, S> where E: CodeExecutor, { - LightDataChecker::new(blockchain, executor) + LightDataChecker::new(blockchain, executor, spawn_handle) } diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index da62696a152d9ddeeef615e96c28d02a275440ea..9da3c4e1274cd6065999b2a0214280b4b44ce87d 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-state-db" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,11 +11,14 @@ description = "State database maintenance. Handles canonicalization and pruning [dependencies] parking_lot = "0.10.0" log = "0.4.8" -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -parity-util-mem = "0.5.2" +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +parity-util-mem = "0.6" parity-util-mem-derive = "0.1.0" [dev-dependencies] env_logger = "0.7.0" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 248c90ec68623f0320815fbf25511d07304dc31d..dcf218e9763fdb9cb39e7a7877881c7915a5d055 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-telemetry" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" @@ -13,10 +13,10 @@ documentation = "https://docs.rs/sc-telemetry" [dependencies] bytes = "0.5" parking_lot = "0.10.0" -futures = "0.3.1" +futures = "0.3.4" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } +libp2p = { version = "0.17.0", default-features = false, features = ["websocket", "wasm-ext", "tcp", "dns"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" @@ -26,3 +26,6 @@ slog-json = { version = "2.3.0", features = ["nested-values"] } slog-scope = "4.1.2" take_mut = "0.2.2" void = "1.0.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index f8ca6d5c73d41bdd86dff66a2ebbff8dc7d33dbc..6c90d6bbcca9f3a60fc765e056abd908c0ea0636 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -22,7 +22,7 @@ //! `slog_scope::with_logger` followed with `slog_log!`. //! //! Note that you are supposed to only ever use `telemetry!` and not `slog_scope::with_logger` at -//! the moment. Substate may eventually be reworked to get proper `slog` support, including sending +//! the moment. Substrate may eventually be reworked to get proper `slog` support, including sending //! information to the telemetry. //! //! The [`Telemetry`] struct implements `Stream` and must be polled regularly (or sent to a @@ -41,7 +41,7 @@ //! endpoints: sc_telemetry::TelemetryEndpoints::new(vec![ //! // The `0` is the maximum verbosity level of messages to send to this endpoint. //! ("wss://example.com".into(), 0) -//! ]), +//! ]).expect("Invalid URL or multiaddr provided"), //! // Can be used to pass an external implementation of WebSockets. //! wasm_external_transport: None, //! }); @@ -62,7 +62,7 @@ use futures::{prelude::*, channel::mpsc}; use libp2p::{Multiaddr, wasm_ext}; use log::{error, warn}; use parking_lot::Mutex; -use serde::{Serialize, Deserialize}; +use serde::{Serialize, Deserialize, Deserializer}; use std::{pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; @@ -96,12 +96,47 @@ pub struct TelemetryConfig { /// /// The URL string can be either a URL or a multiaddress. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] -pub struct TelemetryEndpoints(Vec<(String, u8)>); +pub struct TelemetryEndpoints( + #[serde(deserialize_with = "url_or_multiaddr_deser")] + Vec<(Multiaddr, u8)> +); + +/// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. +fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> + where D: Deserializer<'de> +{ + Vec::<(String, u8)>::deserialize(deserializer)? + .iter() + .map(|e| Ok((url_to_multiaddr(&e.0) + .map_err(serde::de::Error::custom)?, e.1))) + .collect() +} impl TelemetryEndpoints { - pub fn new(endpoints: Vec<(String, u8)>) -> Self { - TelemetryEndpoints(endpoints) + pub fn new(endpoints: Vec<(String, u8)>) -> Result { + let endpoints: Result, libp2p::multiaddr::Error> = endpoints.iter() + .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) + .collect(); + endpoints.map(Self) + } +} + +/// Parses a WebSocket URL into a libp2p `Multiaddr`. +fn url_to_multiaddr(url: &str) -> Result { + // First, assume that we have a `Multiaddr`. + let parse_error = match url.parse() { + Ok(ma) => return Ok(ma), + Err(err) => err, + }; + + // If not, try the `ws://path/url` format. + if let Ok(ma) = libp2p::multiaddr::from_url(url) { + return Ok(ma) } + + // If we have no clue about the format of that string, assume that we were expecting a + // `Multiaddr`. + Err(parse_error) } /// Log levels. @@ -149,13 +184,7 @@ struct TelemetryDrain { /// doesn't provide any way of knowing whether a global logger has already been registered. pub fn init_telemetry(config: TelemetryConfig) -> Telemetry { // Build the list of telemetry endpoints. - let mut endpoints = Vec::new(); - for &(ref url, verbosity) in &config.endpoints.0 { - match url_to_multiaddr(url) { - Ok(addr) => endpoints.push((addr, verbosity)), - Err(err) => warn!(target: "telemetry", "Invalid telemetry URL {}: {}", url, err), - } - } + let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport); let (sender, receiver) = mpsc::channel(16); let guard = { @@ -164,7 +193,7 @@ pub fn init_telemetry(config: TelemetryConfig) -> Telemetry { slog_scope::set_global_logger(root) }; - let worker = match worker::TelemetryWorker::new(endpoints, config.wasm_external_transport) { + let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) { Ok(w) => Some(w), Err(err) => { error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err); @@ -271,24 +300,6 @@ impl slog::Drain for TelemetryDrain { } } -/// Parses a WebSocket URL into a libp2p `Multiaddr`. -fn url_to_multiaddr(url: &str) -> Result { - // First, assume that we have a `Multiaddr`. - let parse_error = match url.parse() { - Ok(ma) => return Ok(ma), - Err(err) => err, - }; - - // If not, try the `ws://path/url` format. - if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma) - } - - // If we have no clue about the format of that string, assume that we were expecting a - // `Multiaddr`. - Err(parse_error) -} - /// Translates to `slog_scope::info`, but contains an additional verbosity /// parameter which the log record is tagged with. Additionally the verbosity /// parameter is added to the record as a key-value pair. @@ -300,3 +311,35 @@ macro_rules! telemetry { }) } } + +#[cfg(test)] +mod telemetry_endpoints_tests { + use libp2p::Multiaddr; + use super::TelemetryEndpoints; + use super::url_to_multiaddr; + + #[test] + fn valid_endpoints() { + let endp = vec![("wss://telemetry.polkadot.io/submit/".into(), 3), ("/ip4/80.123.90.4/tcp/5432".into(), 4)]; + let telem = TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); + let mut res: Vec<(Multiaddr, u8)> = vec![]; + for (a, b) in endp.iter() { + res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) + } + assert_eq!(telem.0, res); + } + + #[test] + fn invalid_endpoints() { + let endp = vec![("/ip4/...80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } + + #[test] + fn valid_and_invalid_endpoints() { + let endp = vec![("/ip4/80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } +} diff --git a/client/telemetry/src/worker/node.rs b/client/telemetry/src/worker/node.rs index 58e9f20bd5f7c7eb221ad2373be5a414ba4154a4..454f504d660050995cba1f6c122c311ba35741ee 100644 --- a/client/telemetry/src/worker/node.rs +++ b/client/telemetry/src/worker/node.rs @@ -116,7 +116,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, pending.push_back(payload.into()); Ok(()) } else { - warn!(target: "telemetry", "Rejected log entry because queue is full for {:?}", + warn!(target: "telemetry", "⚠️ Rejected log entry because queue is full for {:?}", self.addr); Err(()) } @@ -137,7 +137,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, break NodeSocket::Connected(conn) }, Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "Disconnected from {}: {:?}", self.addr, err); + warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); let timeout = gen_rand_reconnect_delay(); self.socket = NodeSocket::WaitingReconnect(timeout); return Poll::Ready(NodeEvent::Disconnected(err)) @@ -146,7 +146,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, } NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { Poll::Ready(Ok(sink)) => { - debug!(target: "telemetry", "Connected to {}", self.addr); + debug!(target: "telemetry", "✅ Connected to {}", self.addr); let conn = NodeSocketConnected { sink, pending: VecDeque::new(), @@ -158,7 +158,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, }, Poll::Pending => break NodeSocket::Dialing(s), Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "Error while dialing {}: {:?}", self.addr, err); + warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); let timeout = gen_rand_reconnect_delay(); socket = NodeSocket::WaitingReconnect(timeout); } @@ -169,7 +169,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, socket = NodeSocket::Dialing(d); } Err(err) => { - warn!(target: "telemetry", "Error while dialing {}: {:?}", self.addr, err); + warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); let timeout = gen_rand_reconnect_delay(); socket = NodeSocket::WaitingReconnect(timeout); } @@ -181,7 +181,7 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, break NodeSocket::WaitingReconnect(s) } NodeSocket::Poisoned => { - error!(target: "telemetry", "Poisoned connection with {}", self.addr); + error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); break NodeSocket::Poisoned } } diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index ad988f0607953fb921d443c15315741dd1820054..319526b61044858240908719d233aaef5a134f98 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-tracing" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -17,7 +17,10 @@ serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } tracing-core = "0.1.7" -sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sc-telemetry = { version = "2.0.0-alpha.5", path = "../telemetry" } [dev-dependencies] tracing = "0.1.10" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 233397262a04b9dd67be56a6168eb4785224b914..aeb20e98638d5b2dd5e689dc8cc1e90d23437084 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,24 +9,30 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate transaction pool implementation." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" log = "0.4.8" parking_lot = "0.10.0" wasm-timer = "0.2" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sc-transaction-graph = { version = "2.0.0-alpha.2", path = "./graph" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -futures-timer = "2.0" -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../primitives/utils" } +sc-transaction-graph = { version = "2.0.0-alpha.5", path = "./graph" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../primitives/transaction-pool" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../api" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +intervalier = "0.4.0" +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } [dev-dependencies] -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +assert_matches = "1.3.0" +hex = "0.4" +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } substrate-test-runtime-transaction-pool = { version = "2.0.0-dev", path = "../../test-utils/runtime/transaction-pool" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index 080061dd256ff145d0f70285d7afad6a50658752..df2fd8546a2650453d273d558bf6b17355509b5c 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sc-transaction-graph" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,24 +10,28 @@ description = "Generic Transaction Pool" [dependencies] derive_more = "0.99.2" -futures = "0.3.1" +futures = "0.3.4" log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../../primitives/utils" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } criterion = "0.3" [[bench]] name = "basics" harness = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/graph/benches/basics.rs index 6f5d39f09f5c540a31762cd129e89f48bb4c66d1..23b4dba3488061429a39c04bcd717b55ee007c78 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/graph/benches/basics.rs @@ -18,12 +18,14 @@ use criterion::{criterion_group, criterion_main, Criterion}; use futures::{future::{ready, Ready}, executor::block_on}; use sc_transaction_graph::*; -use sp_runtime::transaction_validity::{ValidTransaction, InvalidTransaction}; use codec::Encode; use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; use sp_runtime::{ generic::BlockId, - transaction_validity::{TransactionValidity, TransactionTag as Tag}, + transaction_validity::{ + ValidTransaction, InvalidTransaction, TransactionValidity, TransactionTag as Tag, + TransactionSource, + }, }; use sp_core::blake2_256; @@ -55,6 +57,7 @@ impl ChainApi for TestApi { fn validate_transaction( &self, at: &BlockId, + _source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture { let nonce = uxt.transfer().nonce; @@ -121,6 +124,7 @@ fn uxt(transfer: Transfer) -> Extrinsic { } fn bench_configured(pool: Pool, number: u64) { + let source = TransactionSource::External; let mut futures = Vec::new(); let mut tags = Vec::new(); @@ -133,7 +137,7 @@ fn bench_configured(pool: Pool, number: u64) { }); tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&BlockId::Number(1), xt)); + futures.push(pool.submit_one(&BlockId::Number(1), source, xt)); } let res = block_on(futures::future::join_all(futures.into_iter())); diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 33b7a51f9415b5bc04a288e3a773e817178b4130..38151e9bfd23b87b3fd4511022eaa7f9953ab46a 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -33,6 +33,7 @@ use sp_runtime::transaction_validity::{ TransactionTag as Tag, TransactionLongevity as Longevity, TransactionPriority as Priority, + TransactionSource as Source, }; use sp_transaction_pool::{error, PoolStatus, InPoolTransaction}; @@ -102,6 +103,8 @@ pub struct Transaction { pub provides: Vec, /// Should that transaction be propagated. pub propagate: bool, + /// Source of that transaction. + pub source: Source, } impl AsRef for Transaction { @@ -155,6 +158,7 @@ impl Transaction { bytes: self.bytes.clone(), hash: self.hash.clone(), priority: self.priority.clone(), + source: self.source, valid_till: self.valid_till.clone(), requires: self.requires.clone(), provides: self.provides.clone(), @@ -185,6 +189,7 @@ impl fmt::Debug for Transaction where write!(fmt, "valid_till: {:?}, ", &self.valid_till)?; write!(fmt, "bytes: {:?}, ", &self.bytes)?; write!(fmt, "propagate: {:?}, ", &self.propagate)?; + write!(fmt, "source: {:?}, ", &self.source)?; write!(fmt, "requires: [")?; print_tags(fmt, &self.requires)?; write!(fmt, "], provides: [")?; @@ -556,6 +561,7 @@ mod tests { requires: vec![], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); // then @@ -578,6 +584,7 @@ mod tests { requires: vec![], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![1u8], @@ -588,6 +595,7 @@ mod tests { requires: vec![], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap_err(); // then @@ -611,6 +619,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -623,6 +632,7 @@ mod tests { requires: vec![], provides: vec![vec![0]], propagate: true, + source: Source::External, }).unwrap(); // then @@ -645,6 +655,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![3u8], @@ -655,6 +666,7 @@ mod tests { requires: vec![vec![2]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![2u8], @@ -665,6 +677,7 @@ mod tests { requires: vec![vec![1]], provides: vec![vec![3], vec![2]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![4u8], @@ -675,6 +688,7 @@ mod tests { requires: vec![vec![3], vec![4]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -688,6 +702,7 @@ mod tests { requires: vec![], provides: vec![vec![0], vec![4]], propagate: true, + source: Source::External, }).unwrap(); // then @@ -720,6 +735,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![3u8], @@ -730,6 +746,7 @@ mod tests { requires: vec![vec![1]], provides: vec![vec![2]], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -744,6 +761,7 @@ mod tests { requires: vec![vec![2]], provides: vec![vec![0]], propagate: true, + source: Source::External, }).unwrap(); // then @@ -764,6 +782,7 @@ mod tests { requires: vec![], provides: vec![vec![0]], propagate: true, + source: Source::External, }).unwrap(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), Some(4)); @@ -792,6 +811,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![3u8], @@ -802,6 +822,7 @@ mod tests { requires: vec![vec![1]], provides: vec![vec![2]], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 0); assert_eq!(pool.ready.len(), 0); @@ -816,6 +837,7 @@ mod tests { requires: vec![vec![2]], provides: vec![vec![0]], propagate: true, + source: Source::External, }).unwrap(); // then @@ -836,6 +858,7 @@ mod tests { requires: vec![], provides: vec![vec![0]], propagate: true, + source: Source::External, }).unwrap_err(); let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); assert_eq!(it.next(), None); @@ -859,6 +882,7 @@ mod tests { requires: vec![], provides: vec![vec![0], vec![4]], propagate: true, + source: Source::External, }).expect("import 1 should be ok"); pool.import(Transaction { data: vec![3u8; 1024], @@ -869,6 +893,7 @@ mod tests { requires: vec![], provides: vec![vec![2], vec![7]], propagate: true, + source: Source::External, }).expect("import 2 should be ok"); assert!(parity_util_mem::malloc_size(&pool) > 5000); @@ -887,6 +912,7 @@ mod tests { requires: vec![], provides: vec![vec![0], vec![4]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![1u8], @@ -897,6 +923,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![3u8], @@ -907,6 +934,7 @@ mod tests { requires: vec![vec![2]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![2u8], @@ -917,6 +945,7 @@ mod tests { requires: vec![vec![1]], provides: vec![vec![3], vec![2]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![4u8], @@ -927,6 +956,7 @@ mod tests { requires: vec![vec![3], vec![4]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); // future pool.import(Transaction { @@ -938,6 +968,7 @@ mod tests { requires: vec![vec![11]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 5); assert_eq!(pool.future.len(), 1); @@ -964,6 +995,7 @@ mod tests { requires: vec![vec![0]], provides: vec![vec![100]], propagate: true, + source: Source::External, }).unwrap(); // ready pool.import(Transaction { @@ -975,6 +1007,7 @@ mod tests { requires: vec![], provides: vec![vec![1]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![2u8], @@ -985,6 +1018,7 @@ mod tests { requires: vec![vec![2]], provides: vec![vec![3]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![3u8], @@ -995,6 +1029,7 @@ mod tests { requires: vec![vec![1]], provides: vec![vec![2]], propagate: true, + source: Source::External, }).unwrap(); pool.import(Transaction { data: vec![4u8], @@ -1005,6 +1040,7 @@ mod tests { requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: true, + source: Source::External, }).unwrap(); assert_eq!(pool.ready().count(), 4); @@ -1040,10 +1076,11 @@ mod tests { requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: true, + source: Source::External, }), "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -requires: [03,02], provides: [04], data: [4]}".to_owned() +source: TransactionSource::External, requires: [03,02], provides: [04], data: [4]}".to_owned() ); } @@ -1058,6 +1095,7 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: true, + source: Source::External, }.is_propagable(), true); assert_eq!(Transaction { @@ -1069,6 +1107,7 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() requires: vec![vec![3], vec![2]], provides: vec![vec![4]], propagate: false, + source: Source::External, }.is_propagable(), false); } @@ -1090,6 +1129,7 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() requires: vec![vec![0]], provides: vec![], propagate: true, + source: Source::External, }); if let Err(error::Error::RejectedFutureTransaction) = err { @@ -1113,6 +1153,7 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() requires: vec![vec![0]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); // then @@ -1142,6 +1183,7 @@ requires: [03,02], provides: [04], data: [4]}".to_owned() requires: vec![vec![0]], provides: vec![], propagate: true, + source: Source::External, }).unwrap(); flag diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index a84a5fbe689c37aacbba24ed1e6a8c6885961589..76181c837f988586d72ebd3568b4c31a35ac6b8d 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -249,6 +249,7 @@ impl FutureTransactions { #[cfg(test)] mod tests { use super::*; + use sp_runtime::transaction_validity::TransactionSource; #[test] fn can_track_heap_size() { @@ -263,6 +264,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, + source: TransactionSource::External, }.into(), missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), imported_at: std::time::Instant::now(), diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index f0bf17dcb8dd2ecb2091dc9a799c06e8ac90f879..0b817b155d8ecaef12aee3461e0e475d20b15a86 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -24,23 +24,23 @@ use crate::base_pool as base; use crate::watcher::Watcher; use serde::Serialize; -use futures::{ - Future, FutureExt, - channel::mpsc, -}; +use futures::{Future, FutureExt}; use sp_runtime::{ generic::BlockId, traits::{self, SaturatedConversion}, - transaction_validity::{TransactionValidity, TransactionTag as Tag, TransactionValidityError}, + transaction_validity::{ + TransactionValidity, TransactionTag as Tag, TransactionValidityError, TransactionSource, + }, }; use sp_transaction_pool::error; use wasm_timer::Instant; +use sp_utils::mpsc::TracingUnboundedReceiver; use crate::validated_pool::ValidatedPool; pub use crate::validated_pool::ValidatedTransaction; /// Modification notification event stream type; -pub type EventStream = mpsc::UnboundedReceiver; +pub type EventStream = TracingUnboundedReceiver; /// Extrinsic hash type for a pool. pub type ExHash = ::Hash; @@ -76,6 +76,7 @@ pub trait ChainApi: Send + Sync { fn validate_transaction( &self, at: &BlockId, + source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture; @@ -144,12 +145,17 @@ impl Pool { } /// Imports a bunch of unverified extrinsics to the pool - pub async fn submit_at(&self, at: &BlockId, xts: T, force: bool) - -> Result, B::Error>>, B::Error> - where - T: IntoIterator> + pub async fn submit_at( + &self, + at: &BlockId, + source: TransactionSource, + xts: T, + force: bool, + ) -> Result, B::Error>>, B::Error> where + T: IntoIterator>, { let validated_pool = self.validated_pool.clone(); + let xts = xts.into_iter().map(|xt| (source, xt)); self.verify(at, xts, force) .map(move |validated_transactions| validated_transactions .map(|validated_transactions| validated_pool.submit(validated_transactions @@ -162,9 +168,10 @@ impl Pool { pub async fn submit_one( &self, at: &BlockId, + source: TransactionSource, xt: ExtrinsicFor, ) -> Result, B::Error> { - self.submit_at(at, std::iter::once(xt), false) + self.submit_at(at, source, std::iter::once(xt), false) .map(|import_result| import_result.and_then(|mut import_result| import_result .pop() .expect("One extrinsic passed; one result returned; qed") @@ -176,10 +183,13 @@ impl Pool { pub async fn submit_and_watch( &self, at: &BlockId, + source: TransactionSource, xt: ExtrinsicFor, ) -> Result, BlockHash>, B::Error> { let block_number = self.resolve_block_number(at)?; - let (_, tx) = self.verify_one(at, block_number, xt, false).await; + let (_, tx) = self.verify_one( + at, block_number, source, xt, false + ).await; self.validated_pool.submit_and_watch(tx) } @@ -249,7 +259,7 @@ impl Pool { // to get validity info and tags that the extrinsic provides. None => { let validity = self.validated_pool.api() - .validate_transaction(parent, extrinsic.clone()) + .validate_transaction(parent, TransactionSource::InBlock, extrinsic.clone()) .await; if let Ok(Ok(validity)) = validity { @@ -303,8 +313,12 @@ impl Pool { // Try to re-validate pruned transactions since some of them might be still valid. // note that `known_imported_hashes` will be rejected here due to temporary ban. - let pruned_hashes = prune_status.pruned.iter().map(|tx| tx.hash.clone()).collect::>(); - let pruned_transactions = prune_status.pruned.into_iter().map(|tx| tx.data.clone()); + let pruned_hashes = prune_status.pruned + .iter() + .map(|tx| tx.hash.clone()).collect::>(); + let pruned_transactions = prune_status.pruned + .into_iter() + .map(|tx| (tx.source, tx.data.clone())); let reverified_transactions = self.verify(at, pruned_transactions, false).await?; @@ -335,7 +349,7 @@ impl Pool { async fn verify( &self, at: &BlockId, - xts: impl IntoIterator>, + xts: impl IntoIterator)>, force: bool, ) -> Result, ValidatedTransactionFor>, B::Error> { // we need a block number to compute tx validity @@ -345,7 +359,7 @@ impl Pool { for (hash, validated_tx) in futures::future::join_all( xts.into_iter() - .map(|xt| self.verify_one(at, block_number, xt, force)) + .map(|(source, xt)| self.verify_one(at, block_number, source, xt, force)) ) .await { @@ -360,6 +374,7 @@ impl Pool { &self, block_id: &BlockId, block_number: NumberFor, + source: TransactionSource, xt: ExtrinsicFor, force: bool, ) -> (ExHash, ValidatedTransactionFor) { @@ -371,7 +386,11 @@ impl Pool { ) } - let validation_result = self.validated_pool.api().validate_transaction(block_id, xt.clone()).await; + let validation_result = self.validated_pool.api().validate_transaction( + block_id, + source, + xt.clone(), + ).await; let status = match validation_result { Ok(status) => status, @@ -386,6 +405,7 @@ impl Pool { ValidatedTransaction::valid_at( block_number.saturated_into::(), hash.clone(), + source, xt, bytes, validity, @@ -422,7 +442,7 @@ mod tests { use futures::executor::block_on; use super::*; use sp_transaction_pool::TransactionStatus; - use sp_runtime::transaction_validity::{ValidTransaction, InvalidTransaction}; + use sp_runtime::transaction_validity::{ValidTransaction, InvalidTransaction, TransactionSource}; use codec::Encode; use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; use assert_matches::assert_matches; @@ -430,6 +450,7 @@ mod tests { use crate::base_pool::Limit; const INVALID_NONCE: u64 = 254; + const SOURCE: TransactionSource = TransactionSource::External; #[derive(Clone, Debug, Default)] struct TestApi { @@ -450,6 +471,7 @@ mod tests { fn validate_transaction( &self, at: &BlockId, + _source: TransactionSource, uxt: ExtrinsicFor, ) -> Self::ValidationFuture { let hash = self.hash_and_length(&uxt).0; @@ -541,7 +563,7 @@ mod tests { let pool = pool(); // when - let hash = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -565,7 +587,7 @@ mod tests { // when pool.validated_pool.rotator().ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&BlockId::Number(0), uxt)); + let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); assert_eq!(pool.validated_pool().status().ready, 0); assert_eq!(pool.validated_pool().status().future, 0); @@ -581,20 +603,20 @@ mod tests { let stream = pool.validated_pool().import_notification_stream(); // when - let _hash = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, nonce: 0, }))).unwrap(); - let _hash = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, nonce: 1, }))).unwrap(); // future doesn't count - let _hash = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -617,19 +639,19 @@ mod tests { fn should_clear_stale_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, nonce: 0, }))).unwrap(); - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, nonce: 1, }))).unwrap(); - let hash3 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash3 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -653,7 +675,7 @@ mod tests { fn should_ban_mined_transactions() { // given let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -680,7 +702,7 @@ mod tests { ..Default::default() }, TestApi::default().into()); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -689,7 +711,7 @@ mod tests { assert_eq!(pool.validated_pool().status().future, 1); // when - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(2)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -716,7 +738,7 @@ mod tests { }, TestApi::default().into()); // when - block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -734,7 +756,7 @@ mod tests { let pool = pool(); // when - let err = block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + let err = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -754,7 +776,7 @@ mod tests { fn should_trigger_ready_and_finalized() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -778,7 +800,7 @@ mod tests { fn should_trigger_ready_and_finalized_when_pruning_via_hash() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -802,7 +824,7 @@ mod tests { fn should_trigger_future_and_ready_after_promoted() { // given let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), uxt(Transfer { + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -812,7 +834,7 @@ mod tests { assert_eq!(pool.validated_pool().status().future, 1); // when - block_on(pool.submit_one(&BlockId::Number(0), uxt(Transfer { + block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { from: AccountId::from_h256(H256::from_low_u64_be(1)), to: AccountId::from_h256(H256::from_low_u64_be(2)), amount: 5, @@ -836,7 +858,7 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), uxt)).unwrap(); + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -860,7 +882,7 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), uxt)).unwrap(); + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -895,7 +917,7 @@ mod tests { amount: 5, nonce: 0, }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), xt)).unwrap(); + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // when @@ -905,7 +927,7 @@ mod tests { amount: 4, nonce: 1, }); - block_on(pool.submit_one(&BlockId::Number(1), xt)).unwrap(); + block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // then @@ -934,7 +956,7 @@ mod tests { // This transaction should go to future, since we use `nonce: 1` let pool2 = pool.clone(); std::thread::spawn(move || { - block_on(pool2.submit_one(&BlockId::Number(0), xt)).unwrap(); + block_on(pool2.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); ready.send(()).unwrap(); }); @@ -948,7 +970,7 @@ mod tests { }); // The tag the above transaction provides (TestApi is using just nonce as u8) let provides = vec![0_u8]; - block_on(pool.submit_one(&BlockId::Number(0), xt)).unwrap(); + block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); assert_eq!(pool.validated_pool().status().ready, 1); // Now block import happens before the second transaction is able to finish verification. diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index 23f0d49a93071ec180822bef294888a4a6373127..c856535a6165140844a339d0539aa291236a561f 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -545,6 +545,7 @@ fn remove_item(vec: &mut Vec, item: &T) { #[cfg(test)] mod tests { use super::*; + use sp_runtime::transaction_validity::TransactionSource as Source; fn tx(id: u8) -> Transaction> { Transaction { @@ -556,6 +557,7 @@ mod tests { requires: vec![vec![1], vec![2]], provides: vec![vec![3], vec![4]], propagate: true, + source: Source::External, } } @@ -656,6 +658,7 @@ mod tests { requires: vec![tx1.provides[0].clone()], provides: vec![], propagate: true, + source: Source::External, }; // when @@ -688,6 +691,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, + source: Source::External, }; import(&mut ready, tx).unwrap(); diff --git a/client/transaction-pool/graph/src/rotator.rs b/client/transaction-pool/graph/src/rotator.rs index 55a9230522e2f7ee19e31cd004184d691ee8fd9e..be96174d1d91696832201bae8d9c6db4d1d0769f 100644 --- a/client/transaction-pool/graph/src/rotator.rs +++ b/client/transaction-pool/graph/src/rotator.rs @@ -100,6 +100,7 @@ impl PoolRotator { #[cfg(test)] mod tests { use super::*; + use sp_runtime::transaction_validity::TransactionSource; type Hash = u64; type Ex = (); @@ -122,6 +123,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, + source: TransactionSource::External, }; (hash, tx) @@ -188,6 +190,7 @@ mod tests { requires: vec![], provides: vec![], propagate: true, + source: TransactionSource::External, } } diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index a62822a91858b3d0bc0023832e0fe82a880116c3..2ff2acfe24fc8db886c2639fd8a9d2c346642532 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -27,15 +27,15 @@ use crate::watcher::Watcher; use serde::Serialize; use log::{debug, warn}; -use futures::channel::mpsc; use parking_lot::{Mutex, RwLock}; use sp_runtime::{ generic::BlockId, traits::{self, SaturatedConversion}, - transaction_validity::{TransactionTag as Tag, ValidTransaction}, + transaction_validity::{TransactionTag as Tag, ValidTransaction, TransactionSource}, }; use sp_transaction_pool::{error, PoolStatus}; use wasm_timer::Instant; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use crate::base_pool::PruneStatus; use crate::pool::{EventStream, Options, ChainApi, ExHash, ExtrinsicFor, TransactionFor}; @@ -58,6 +58,7 @@ impl ValidatedTransaction { pub fn valid_at( at: u64, hash: Hash, + source: TransactionSource, data: Ex, bytes: usize, validity: ValidTransaction, @@ -66,6 +67,7 @@ impl ValidatedTransaction { data, bytes, hash, + source, priority: validity.priority, requires: validity.requires, provides: validity.provides, @@ -93,7 +95,7 @@ pub struct ValidatedPool { ExHash, ExtrinsicFor, >>, - import_notification_sinks: Mutex>>>, + import_notification_sinks: Mutex>>>, rotator: PoolRotator>, } @@ -502,7 +504,7 @@ impl ValidatedPool { /// Consumers of this stream should use the `ready` method to actually get the /// pending transactions in the right order. pub fn import_notification_stream(&self) -> EventStream> { - let (sink, stream) = mpsc::unbounded(); + let (sink, stream) = tracing_unbounded("mpsc_import_notifications"); self.import_notification_sinks.lock().push(sink); stream } @@ -545,7 +547,7 @@ impl ValidatedPool { } /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> { + pub fn ready(&self) -> impl Iterator> + Send { self.pool.read().ready() } diff --git a/client/transaction-pool/graph/src/watcher.rs b/client/transaction-pool/graph/src/watcher.rs index d28f6814e455213a443b583646ee2af6a3abb335..d54cc2718b7cbf40f33633df249d7a8d36da826a 100644 --- a/client/transaction-pool/graph/src/watcher.rs +++ b/client/transaction-pool/graph/src/watcher.rs @@ -16,18 +16,16 @@ //! Extrinsics status updates. -use futures::{ - Stream, - channel::mpsc, -}; +use futures::Stream; use sp_transaction_pool::TransactionStatus; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; /// Extrinsic watcher. /// /// Represents a stream of status updates for particular extrinsic. #[derive(Debug)] pub struct Watcher { - receiver: mpsc::UnboundedReceiver>, + receiver: TracingUnboundedReceiver>, hash: H, } @@ -48,7 +46,7 @@ impl Watcher { /// Sender part of the watcher. Exposed only for testing purposes. #[derive(Debug)] pub struct Sender { - receivers: Vec>>, + receivers: Vec>>, is_finalized: bool, } @@ -64,7 +62,7 @@ impl Default for Sender { impl Sender { /// Add a new watcher to this sender object. pub fn new_watcher(&mut self, hash: H) -> Watcher { - let (tx, receiver) = mpsc::unbounded(); + let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); self.receivers.push(tx); Watcher { receiver, diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index a8888baa4839b46bddd2cb2f2b1486efc324ce33..bf104c8d78191c231b7102e97d31ccdf1237fefa 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -29,10 +29,10 @@ use sc_client_api::{ }; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, - transaction_validity::TransactionValidity, + transaction_validity::{TransactionValidity, TransactionSource}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use sp_api::ProvideRuntimeApi; +use sp_api::{ProvideRuntimeApi, ApiExt}; use crate::error::{self, Error}; @@ -81,6 +81,7 @@ impl sc_transaction_graph::ChainApi for FullChainApi, + source: TransactionSource, uxt: sc_transaction_graph::ExtrinsicFor, ) -> Self::ValidationFuture { let (tx, rx) = oneshot::channel(); @@ -88,8 +89,19 @@ impl sc_transaction_graph::ChainApi for FullChainApi, _>( + &at, |v| v >= 2, + ) + .unwrap_or_default(); + let res = if has_v2 { + runtime_api.validate_transaction(&at, source, uxt) + } else { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_2(&at, uxt) + }; + let res = res.map_err(|e| Error::RuntimeApi(format!("{:?}", e))); if let Err(e) = tx.send(res) { log::warn!("Unable to send a validate transaction result: {:?}", e); } @@ -160,6 +172,7 @@ impl sc_transaction_graph::ChainApi for LightChainApi, + source: TransactionSource, uxt: sc_transaction_graph::ExtrinsicFor, ) -> Self::ValidationFuture { let header_hash = self.client.expect_block_hash_from_id(at); @@ -174,7 +187,7 @@ impl sc_transaction_graph::ChainApi for LightChainApi = Box>> + Send>; + +type ReadyIteratorFor = BoxedReadyIterator, sc_transaction_graph::ExtrinsicFor>; + +type PolledIterator = Pin> + Send>>; + /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool where @@ -54,6 +61,48 @@ pub struct BasicPool api: Arc, revalidation_strategy: Arc>>>, revalidation_queue: Arc>, + ready_poll: Arc, Block>>>, +} + +struct ReadyPoll { + updated_at: NumberFor, + pollers: Vec<(NumberFor, oneshot::Sender)>, +} + +impl Default for ReadyPoll { + fn default() -> Self { + Self { + updated_at: NumberFor::::zero(), + pollers: Default::default(), + } + } +} + +impl ReadyPoll { + fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { + self.updated_at = number; + + let mut idx = 0; + while idx < self.pollers.len() { + if self.pollers[idx].0 <= number { + let poller_sender = self.pollers.swap_remove(idx); + log::debug!(target: "txpool", "Sending ready signal at block {}", number); + let _ = poller_sender.1.send(iterator_factory()); + } else { + idx += 1; + } + } + } + + fn add(&mut self, number: NumberFor) -> oneshot::Receiver { + let (sender, receiver) = oneshot::channel(); + self.pollers.push((number, sender)); + receiver + } + + fn updated_at(&self) -> NumberFor { + self.updated_at + } } #[cfg(not(target_os = "unknown"))] @@ -102,6 +151,27 @@ impl BasicPool Self::with_revalidation_type(options, pool_api, RevalidationType::Full) } + /// Create new basic transaction pool with provided api, for tests. + #[cfg(test)] + pub fn new_test( + pool_api: Arc, + ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { + let pool = Arc::new(sc_transaction_graph::Pool::new(Default::default(), pool_api.clone())); + let (revalidation_queue, background_task, notifier) = + revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); + ( + BasicPool { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), + ready_poll: Default::default(), + }, + background_task, + notifier, + ) + } + /// Create new basic transaction pool with provided api and custom /// revalidation type. pub fn with_revalidation_type( @@ -128,6 +198,7 @@ impl BasicPool RevalidationType::Full => RevalidationStrategy::Always, } )), + ready_poll: Default::default(), }, background_task, ) @@ -152,37 +223,40 @@ impl TransactionPool for BasicPool fn submit_at( &self, at: &BlockId, + source: TransactionSource, xts: Vec>, ) -> PoolFuture, Self::Error>>, Self::Error> { let pool = self.pool.clone(); let at = *at; async move { - pool.submit_at(&at, xts, false).await + pool.submit_at(&at, source, xts, false).await }.boxed() } fn submit_one( &self, at: &BlockId, + source: TransactionSource, xt: TransactionFor, ) -> PoolFuture, Self::Error> { let pool = self.pool.clone(); let at = *at; async move { - pool.submit_one(&at, xt).await + pool.submit_one(&at, source, xt).await }.boxed() } fn submit_and_watch( &self, at: &BlockId, + source: TransactionSource, xt: TransactionFor, ) -> PoolFuture>, Self::Error> { let at = *at; let pool = self.pool.clone(); async move { - pool.submit_and_watch(&at, xt) + pool.submit_and_watch(&at, source, xt) .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) .await }.boxed() @@ -196,10 +270,6 @@ impl TransactionPool for BasicPool self.pool.validated_pool().status() } - fn ready(&self) -> Box>> { - Box::new(self.pool.validated_pool().ready()) - } - fn import_notification_stream(&self) -> ImportNotificationStream> { self.pool.validated_pool().import_notification_stream() } @@ -215,6 +285,27 @@ impl TransactionPool for BasicPool fn ready_transaction(&self, hash: &TxHash) -> Option> { self.pool.validated_pool().ready_by_hash(hash) } + + fn ready_at(&self, at: NumberFor) -> PolledIterator { + if self.ready_poll.lock().updated_at() >= at { + let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); + return Box::pin(futures::future::ready(iterator)); + } + + Box::pin( + self.ready_poll + .lock() + .add(at) + .map(|received| received.unwrap_or_else(|e| { + log::warn!("Error receiving pending set: {:?}", e); + Box::new(vec![].into_iter()) + })) + ) + } + + fn ready(&self) -> ReadyIteratorFor { + Box::new(self.pool.validated_pool().ready()) + } } #[cfg_attr(test, derive(Debug))] @@ -316,7 +407,7 @@ impl MaintainedTransactionPool for BasicPool let block_number = match api.block_id_to_number(&id) { Ok(Some(number)) => number, _ => { - log::trace!(target: "txqueue", "Skipping chain event - no number for that block {:?}", id); + log::trace!(target: "txpool", "Skipping chain event - no number for that block {:?}", id); return Box::pin(ready(())); } }; @@ -329,6 +420,7 @@ impl MaintainedTransactionPool for BasicPool let revalidation_strategy = self.revalidation_strategy.clone(); let retracted = retracted.clone(); let revalidation_queue = self.revalidation_queue.clone(); + let ready_poll = self.ready_poll.clone(); async move { // We don't query block if we won't prune anything @@ -348,6 +440,10 @@ impl MaintainedTransactionPool for BasicPool } } + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the handler of "all blocks notification". + ready_poll.lock().trigger(block_number, move || Box::new(extra_pool.validated_pool().ready())); + if next_action.resubmit { let mut resubmit_transactions = Vec::new(); @@ -366,7 +462,14 @@ impl MaintainedTransactionPool for BasicPool resubmit_transactions.extend(block_transactions); } - if let Err(e) = pool.submit_at(&id, resubmit_transactions, true).await { + if let Err(e) = pool.submit_at( + &id, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + true + ).await { log::debug!( target: "txpool", "[{:?}] Error re-submitting transactions: {:?}", id, e diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index 4b3f9955ca7a9a954debcaea19f1bf19a3bdff9b..f203bf08a0cf0321d084de6ff7efaf96905f466b 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -22,15 +22,15 @@ use sc_transaction_graph::{ChainApi, Pool, ExHash, NumberFor, ValidatedTransacti use sp_runtime::traits::{Zero, SaturatedConversion}; use sp_runtime::generic::BlockId; use sp_runtime::transaction_validity::TransactionValidityError; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; -use futures::{prelude::*, channel::mpsc, stream::unfold}; +use futures::prelude::*; use std::time::Duration; -use futures_timer::Delay; #[cfg(not(test))] const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(200); #[cfg(test)] -pub const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(5); +pub const BACKGROUND_REVALIDATION_INTERVAL: Duration = Duration::from_millis(1); const BACKGROUND_REVALIDATION_BATCH_SIZE: usize = 20; @@ -53,12 +53,6 @@ struct RevalidationWorker { impl Unpin for RevalidationWorker {} -fn interval(duration: Duration) -> impl Stream + Unpin { - unfold((), move |_| { - Delay::new(duration).map(|_| Some(((), ()))) - }).map(drop) -} - /// Revalidate batch of transaction. /// /// Each transaction is validated against chain, and invalid are @@ -78,7 +72,7 @@ async fn batch_revalidate( None => continue, }; - match api.validate_transaction(&BlockId::Number(at), ext.data.clone()).await { + match api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()).await { Ok(Err(TransactionValidityError::Invalid(err))) => { log::debug!(target: "txpool", "[{:?}]: Revalidation: invalid {:?}", ext_hash, err); invalid_hashes.push(ext_hash); @@ -94,6 +88,7 @@ async fn batch_revalidate( ValidatedTransaction::valid_at( at.saturated_into::(), ext_hash, + ext.source, ext.data.clone(), api.hash_and_length(&ext.data).1, validity, @@ -206,20 +201,34 @@ impl RevalidationWorker { /// It does two things: periodically tries to process some transactions /// from the queue and also accepts messages to enqueue some more /// transactions from the pool. - pub async fn run(mut self, from_queue: mpsc::UnboundedReceiver>) { - let interval = interval(BACKGROUND_REVALIDATION_INTERVAL).fuse(); + pub async fn run( + mut self, + from_queue: TracingUnboundedReceiver>, + interval: R, + ) where R: Send, R::Guard: Send + { + let interval = interval.into_stream().fuse(); let from_queue = from_queue.fuse(); futures::pin_mut!(interval, from_queue); let this = &mut self; loop { futures::select! { - _ = interval.next() => { + _guard = interval.next() => { let next_batch = this.prepare_batch(); let batch_len = next_batch.len(); batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; + #[cfg(test)] + { + use intervalier::Guard; + // only trigger test events if something was processed + if batch_len == 0 { + _guard.expect("Always some() in tests").skip(); + } + } + if batch_len > 0 || this.len() > 0 { log::debug!( target: "txpool", @@ -253,7 +262,7 @@ impl RevalidationWorker { pub struct RevalidationQueue { pool: Arc>, api: Arc, - background: Option>>, + background: Option>>, } impl RevalidationQueue @@ -269,11 +278,14 @@ where } } - /// New revalidation queue with background worker. - pub fn new_background(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>) + pub fn new_with_interval( + api: Arc, + pool: Arc>, + interval: R, + ) -> (Self, Pin + Send>>) + where R: Send + 'static, R::Guard: Send { - let (to_worker, from_queue) = mpsc::unbounded(); + let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); let worker = RevalidationWorker::new(api.clone(), pool.clone()); @@ -284,7 +296,25 @@ where background: Some(to_worker), }; - (queue, worker.run(from_queue).boxed()) + (queue, worker.run(from_queue, interval).boxed()) + } + + /// New revalidation queue with background worker. + pub fn new_background(api: Arc, pool: Arc>) -> + (Self, Pin + Send>>) + { + Self::new_with_interval(api, pool, intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL)) + } + + /// New revalidation queue with background worker and test signal. + #[cfg(test)] + pub fn new_test(api: Arc, pool: Arc>) -> + (Self, Pin + Send>>, intervalier::BackSignalControl) + { + let (interval, notifier) = intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); + let (queue, background) = Self::new_with_interval(api, pool, interval); + + (queue, background, notifier) } /// Queue some transaction for later revalidation. @@ -312,9 +342,9 @@ where #[cfg(test)] mod tests { - use super::*; use sc_transaction_graph::Pool; + use sp_transaction_pool::TransactionSource; use substrate_test_runtime_transaction_pool::{TestApi, uxt}; use futures::executor::block_on; use substrate_test_runtime_client::{ @@ -334,7 +364,9 @@ mod tests { let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); let uxt = uxt(Alice, 0); - let uxt_hash = block_on(pool.submit_one(&BlockId::number(0), uxt.clone())).expect("Should be valid"); + let uxt_hash = block_on( + pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone()) + ).expect("Should be valid"); block_on(queue.revalidate_later(0, vec![uxt_hash])); @@ -343,4 +375,4 @@ mod tests { // number of ready assert_eq!(pool.validated_pool().status().ready, 1); } -} \ No newline at end of file +} diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 7494ba2684f1b5d5144f2c8bc4bbce1a7b3a93af..e7021e8ea071287eb2448bba4c24c577c8ad0c36 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -20,25 +20,32 @@ use futures::executor::block_on; use txpool::{self, Pool}; use sp_runtime::{ generic::BlockId, - transaction_validity::ValidTransaction, + transaction_validity::{ValidTransaction, InvalidTransaction, TransactionSource}, }; use substrate_test_runtime_client::{ - runtime::{Block, Hash, Index, Header, Extrinsic}, + runtime::{Block, Hash, Index, Header, Extrinsic, Transfer}, AccountKeyring::*, }; use substrate_test_runtime_transaction_pool::{TestApi, uxt}; -use crate::revalidation::BACKGROUND_REVALIDATION_INTERVAL; +use futures::{prelude::*, task::Poll}; +use codec::Encode; fn pool() -> Pool { Pool::new(Default::default(), TestApi::with_alice_nonce(209).into()) } -fn maintained_pool() -> (BasicPool, futures::executor::ThreadPool) { - let (pool, background_task) = BasicPool::new(Default::default(), std::sync::Arc::new(TestApi::with_alice_nonce(209))); +fn maintained_pool() -> ( + BasicPool, + futures::executor::ThreadPool, + intervalier::BackSignalControl, +) { + let (pool, background_task, notifier) = BasicPool::new_test( + std::sync::Arc::new(TestApi::with_alice_nonce(209)) + ); let thread_pool = futures::executor::ThreadPool::new().unwrap(); - thread_pool.spawn_ok(background_task.expect("basic pool have background task")); - (pool, thread_pool) + thread_pool.spawn_ok(background_task); + (pool, thread_pool, notifier) } fn header(number: u64) -> Header { @@ -51,10 +58,12 @@ fn header(number: u64) -> Header { } } +const SOURCE: TransactionSource = TransactionSource::External; + #[test] fn submission_should_work() { let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209]); @@ -63,8 +72,8 @@ fn submission_should_work() { #[test] fn multiple_submission_should_work() { let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); @@ -73,7 +82,7 @@ fn multiple_submission_should_work() { #[test] fn early_nonce_should_be_culled() { let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 208))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 208))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::::new()); @@ -83,11 +92,11 @@ fn early_nonce_should_be_culled() { fn late_nonce_should_be_queued() { let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 210))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::::new()); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); } @@ -95,8 +104,8 @@ fn late_nonce_should_be_queued() { #[test] fn prune_tags_should_work() { let pool = pool(); - let hash209 = block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 210))).unwrap(); + let hash209 = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, vec![209, 210]); @@ -117,16 +126,16 @@ fn prune_tags_should_work() { fn should_ban_invalid_transactions() { let pool = pool(); let uxt = uxt(Alice, 209); - let hash = block_on(pool.submit_one(&BlockId::number(0), uxt.clone())).unwrap(); + let hash = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap(); pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&BlockId::number(0), uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); // when let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); assert_eq!(pending, Vec::::new()); // then - block_on(pool.submit_one(&BlockId::number(0), uxt.clone())).unwrap_err(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); } #[test] @@ -137,7 +146,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { })); let pool = Pool::new(Default::default(), api.clone()); let xt = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), xt.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); // remove the transaction that just got imported. @@ -150,7 +159,7 @@ fn should_correctly_prune_transactions_providing_more_than_one_tag() { // so now let's insert another transaction that also provides the 155 api.increment_nonce(Alice.into()); let xt = uxt(Alice, 211); - block_on(pool.submit_one(&BlockId::number(2), xt.clone())).expect("2. Imported"); + block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); assert_eq!(pool.validated_pool().status().ready, 1); assert_eq!(pool.validated_pool().status().future, 1); let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); @@ -186,9 +195,9 @@ fn block_event_with_retracted(id: u64, retracted: Vec) -> ChainEvent>(), @@ -395,11 +402,11 @@ fn should_push_watchers_during_maintaince() { #[test] fn can_track_heap_size() { - let (pool, _guard) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 209))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 210))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 211))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), uxt(Alice, 212))).expect("1. Imported"); + let (pool, _guard, _notifier) = maintained_pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 211))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 212))).expect("1. Imported"); assert!(parity_util_mem::malloc_size(&pool) > 3000); } @@ -410,7 +417,9 @@ fn finalization() { let api = TestApi::with_alice_nonce(209); api.push_block(1, vec![]); let (pool, _background) = BasicPool::new(Default::default(), api.into()); - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), xt.clone())).expect("1. Imported"); + let watcher = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) + ).expect("1. Imported"); pool.api.push_block(2, vec![xt.clone()]); let header = pool.api.chain().read().header_by_number.get(&2).cloned().unwrap(); @@ -460,7 +469,9 @@ fn fork_aware_finalization() { // block B1 { - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_alice.clone())).expect("1. Imported"); + let watcher = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) + ).expect("1. Imported"); let header = pool.api.push_block(2, vec![from_alice.clone()]); canon_watchers.push((watcher, header.hash())); assert_eq!(pool.status().ready, 1); @@ -481,8 +492,9 @@ fn fork_aware_finalization() { // block C2 { let header = pool.api.push_fork_block_with_parent(b1, vec![from_dave.clone()]); - from_dave_watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_dave.clone())) - .expect("1. Imported"); + from_dave_watcher = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()) + ).expect("1. Imported"); assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBlock { id: BlockId::Hash(header.hash()), @@ -497,7 +509,9 @@ fn fork_aware_finalization() { // block D2 { - from_bob_watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_bob.clone())).expect("1. Imported"); + from_bob_watcher = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()) + ).expect("1. Imported"); assert_eq!(pool.status().ready, 1); let header = pool.api.push_fork_block_with_parent(c2, vec![from_bob.clone()]); @@ -514,7 +528,9 @@ fn fork_aware_finalization() { // block C1 { - let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_charlie.clone())).expect("1.Imported"); + let watcher = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()) + ).expect("1.Imported"); assert_eq!(pool.status().ready, 1); let header = pool.api.push_block(3, vec![from_charlie.clone()]); @@ -534,7 +550,9 @@ fn fork_aware_finalization() { // block D1 { let xt = uxt(Eve, 0); - let w = block_on(pool.submit_and_watch(&BlockId::number(1), xt.clone())).expect("1. Imported"); + let w = block_on( + pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) + ).expect("1. Imported"); assert_eq!(pool.status().ready, 3); let header = pool.api.push_block(4, vec![xt.clone()]); canon_watchers.push((w, header.hash())); @@ -600,5 +618,89 @@ fn fork_aware_finalization() { assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); assert_eq!(stream.next(), None); } +} + +#[test] +fn ready_set_should_not_resolve_before_block_update() { + let (pool, _guard, _notifier) = maintained_pool(); + let xt1 = uxt(Alice, 209); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + + assert!(pool.ready_at(1).now_or_never().is_none()); +} + +#[test] +fn ready_set_should_resolve_after_block_update() { + let (pool, _guard, _notifier) = maintained_pool(); + pool.api.push_block(1, vec![]); + + let xt1 = uxt(Alice, 209); + + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.maintain(block_event(1))); + + assert!(pool.ready_at(1).now_or_never().is_some()); +} + +#[test] +fn ready_set_should_eventually_resolve_when_block_update_arrives() { + let (pool, _guard, _notifier) = maintained_pool(); + pool.api.push_block(1, vec![]); + + let xt1 = uxt(Alice, 209); + + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + + let noop_waker = futures::task::noop_waker(); + let mut context = futures::task::Context::from_waker(&noop_waker); + + let mut ready_set_future = pool.ready_at(1); + if let Poll::Ready(_) = ready_set_future.poll_unpin(&mut context) { + panic!("Ready set should not be ready before block update!"); + } + block_on(pool.maintain(block_event(1))); + + match ready_set_future.poll_unpin(&mut context) { + Poll::Pending => { + panic!("Ready set should become ready after block update!"); + }, + Poll::Ready(iterator) => { + let data = iterator.collect::>(); + assert_eq!(data.len(), 1); + } + } +} + +#[test] +fn should_not_accept_old_signatures() { + use std::convert::TryFrom; + + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new( + BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client))).0 + ); + + let transfer = Transfer { + from: Alice.into(), + to: Bob.into(), + nonce: 0, + amount: 1, + }; + let _bytes: sp_core::sr25519::Signature = transfer.using_encoded(|e| Alice.sign(e)).into(); + + // generated with schnorrkel 0.1.1 from `_bytes` + let old_singature = sp_core::sr25519::Signature::try_from(&hex::decode( + "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108" + ).expect("hex invalid")[..]).expect("signature construction failed"); + + let xt = Extrinsic::Transfer { transfer, signature: old_singature, exhaust_resources_when_not_first: false }; + + assert_matches::assert_matches!( + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())), + Err(error::Error::Pool( + sp_transaction_pool::error::Error::InvalidTransaction(InvalidTransaction::BadProof) + )), + "Should be invalid transactiono with bad proof", + ); } diff --git a/docs/CHANGELOG.md b/docs/CHANGELOG.md new file mode 100644 index 0000000000000000000000000000000000000000..49d47d611cac3e3b0acae94b1d5fb2a26d7075e3 --- /dev/null +++ b/docs/CHANGELOG.md @@ -0,0 +1,58 @@ +# Changelog + +The format is based on [Keep a Changelog]. + +[Keep a Changelog]: http://keepachangelog.com/en/1.0.0/ + +## Unreleased + +### Changed + +* `sc_rpc::system::SystemInfo.impl_version` now returns the full version (2.0.0-alpha.2-b950f731c-x86_64-linux-gnu) instead of the short version (1.0.0) (#5271) + +## 2.0.0-alpha.4 -> 2.0.0-alpha.5 + +Runtime +------- + +* pallet-evm: configurable gasometer config (#5320) +* Adds new event phase `Initialization` (#5302) + +## 2.0.0-alpha.3 -> 2.0.0-alpha.4 + +Runtime +------- + +* Move runtime upgrade to `frame-executive` (#5197) +* Split fees and tips between author and treasury independently (#5207) +* Refactor session away from needless double_maps (#5202) +* Remove `secp256k1` from WASM build (#5187) +* Introduce default-setting prime for collective (#5137) +* Adds `vested_transfer` to Vesting pallet (#5029) +* Change extrinsic_count to extrinsic_index in pallet-utility (#5044) + +Client +------ + +* client/finality-grandpa: Add Prometheus metrics to GossipValidator (#5237) +* removes use of sc_client::Client from node-transaction-factory (#5158) +* removes use of sc_client::Client from sc_network (#5147) +* Use CLI to configure max instances cache (#5177) +* client/service/src/builder.rs: Add build_info metric (#5192) +* Remove substrate-ui.parity.io from CORS whitelist (#5142) +* removes use of sc_client::Client from sc-rpc (#5063) +* Use 128mb for db cache default (#5134) +* Drop db-cache default from 1gig to 32mb (#5128) +* Add more metrics to prometheus (#5034) + +API +--- + +* Produce block always on updated transaction pool state (#5227) +* Add `ext_terminate` (#5234) +* Add ext_transfer call (#5169) +* ChainSpec trait (#5185) +* client/authority-discovery: Instrument code with Prometheus (#5195) +* Don't include `:code` by default in storage proofs (#5179) +* client/network-gossip: Merge GossipEngine and GossipEngineInner (#5042) +* Introduce `on_runtime_upgrade` (#5058) diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 701df299a35453ce71a037150d46d329847d5f80..d1eb924d0d3c11c4b512072ee841c80b62a89d7a 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -20,15 +20,22 @@ # Wasm execution and the wasm side of Substrate Runtime Interface /client/executor/ @pepyakin -/primitives/io/ @pepyakin +/primitives/io/ @pepyakin @NikVolf + +# Crypto, execution extensions, etc. +/primitives/core/ @NikVolf + +# Block production +/primitives/authorship/ @NikVolf +/client/basic-authorship/ @NikVolf # Sandboxing capability of Substrate Runtime /primitives/sr-sandbox/ @pepyakin /primitives/core/src/sandbox.rs @pepyakin # Transaction pool -/client/transaction-pool/ @tomusdrw -/primitives/transaction-pool/ @tomusdrw +/client/transaction-pool/ @NikVolf +/primitives/transaction-pool/ @NikVolf # Offchain /client/offchain/ @tomusdrw @@ -59,9 +66,14 @@ # Inflation points /frame/staking/src/inflation.rs @thiolliere -# NPoS and Governance +# NPoS and Governance and Phragmén /frame/staking/ @kianenigma /frame/elections/ @kianenigma +/frame/elections-phragmen/ @kianenigma +/primitives/phragmen/ @kianenigma + +# Fixed point arithmetic +/primitives/sp-arithmetic/ @kianenigma @thiolliere # End to end testing of substrate node /bin/node/executor/ @kianenigma @@ -78,3 +90,7 @@ # Prometheus endpoint /utils/prometheus/ @mxinden + +# CLI API +/client/cli @cecton +/client/cli-derive @cecton diff --git a/docs/CONTRIBUTING.adoc b/docs/CONTRIBUTING.adoc index cdd9809fff864cdb1b26321d76567ac0152233e5..b573aef50d874b304f3b135b4cffd08811fcfdea 100644 --- a/docs/CONTRIBUTING.adoc +++ b/docs/CONTRIBUTING.adoc @@ -25,9 +25,10 @@ Merging pull requests once CI is successful: - it does not alter any logic (e.g. comments, dependencies, docs), then it may be tagged https://github.com/paritytech/substrate/pulls?utf8=%E2%9C%93&q=is%3Apr+is%3Aopen+label%3AA2-insubstantial[`insubstantial`] and merged by its author once CI is complete. - it is an urgent fix with no large change to logic, then it may be merged after a non-author contributor has approved the review once CI is complete. -. Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. -. If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-looksgood[`A8-looksgood`], it is ready to merge. -. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/B2-breaksapi[`breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`breaksconsensus`] +. Once a PR is ready for review please add the https://github.com/paritytech/substrate/pulls?q=is%3Apr+is%3Aopen+label%3AA0-pleasereview[`A0-pleasereview`] label. Generally PRs should sit with this label for 48 hours in order to garner feedback. It may be merged before if all relevant parties had a look at it. +. If the first review is not an approval, swap `A0-pleasereview` to any label `[A3, A7]` to indicate that the PR has received some feedback, but needs further work. For example. https://github.com/paritytech/substrate/labels/A3-inprogress[`A3-inprogress`] is a general indicator that the PR is work in progress and https://github.com/paritytech/substrate/labels/A4-gotissues[`A4-gotissues`] means that it has significant problems that need fixing. Once the work is done, change the label back to `A0-pleasereview`. You might end up swapping a few times back and forth to climb up the A label group. Once a PR is https://github.com/paritytech/substrate/labels/A8-mergeoncegreen[`A8-mergeoncegreen`], it is ready to merge. +. PRs must be tagged with respect to _release notes_ with https://github.com/paritytech/substrate/labels/B0-silent[`B0-silent`] and `B1-..`. The former indicates that no changes should be mentioned in any release notes. The latter indicates that the changes should be reported in the corresponding release note +. PRs that break the external API must be tagged with https://github.com/paritytech/substrate/labels/B2-breaksapi[`B2-breaksapi`], when it changes the FRAME or consensus of running system with https://github.com/paritytech/substrate/labels/B3-breaksconsensus[`B3-breaksconsensus`] . No PR should be merged until all reviews' comments are addressed. *Reviewing pull requests*: @@ -48,16 +49,19 @@ When reviewing a pull request, the end-goal is to suggest useful changes to the === Updating Polkadot as well -If your PR changes the external APIs or interfaces used by Polkadot, **a corresponding PR on Polkadot must** be submitted as well. If you tagged the PR with `breaksapi` or `breaksconsensus` this is most certainly the case, in all other cases check for it by running step 1. +**All pull requests will be checked agains either Polkadot master, or your provided Polkadot companion PR**. That is, If your PR changes the external APIs or interfaces used by Polkadot. If you tagged the PR with `breaksapi` or `breaksconsensus` this is most certainly the case, in all other cases check for it by running step 1 below. -To update a corresponding Polkadot PR: +To create a Polkadot companion PR: -0. Pull latest Polkadot master (or clone it, if you haven't yet). -1. Replace `polkadot-master` in all `Cargo.toml` with the name of the PR-branch - e.g. by running `find . -name "Cargo.toml" -exec sed -i "s/polkadot-master/PR_BRANCH/g" {}` (and to your repo, if the branch is not on mainline); Commit this change. -2. Make the changes required to pass the build again. -3. Submit all this as a PR against the Polkadot Repo, link that new PR in the existing substrate PR for reference -4. Wait for reviews on both -5. Once both PRs have been green lit, merge the Substrate PR into master and sync the changes onto the `polkadot-master` on mainline (push it). Now revert that first commit in your Polkadot PR, reverting the name back to `polkadot-master`, push and wait for the CI to confirm all is fine. Then merge the Polkadot PR. +. Pull latest Polkadot master (or clone it, if you haven't yet). +. Override your local cargo config to point to your local substrate (pointing to your WIP branch): place `paths = ["path/to/substrate"]` in `~/.cargo/config`. +. Make the changes required and build polkadot locally. +. Submit all this as a PR against the Polkadot Repo. Link to your Polkadot PR in the _description_ of your Substrate PR as "polkadot companion: [URL]" OR use the same name for your Polkdadot branch as the Substrate branch. +. Now you should see that the `check_polkadot` CI job will build your Substrate PR agains the mentioned Polkadot branch in your PR description. +. Wait for reviews on both +. Once both PRs have been green lit, they can both be merged 🍻. + +If your PR is reviewed well, but a Polkadot PR is missing, signal it with https://github.com/paritytech/substrate/labels/A7-needspolkadotpr[`A7-needspolkadotpr`] to prevent it from getting automatically merged. As there might be multiple pending PRs that might conflict with one another, a) you should not merge the substrate PR until the Polkadot PR has also been reviewed and b) both should be merged pretty quickly after another to not block others. diff --git a/docs/README.adoc b/docs/README.adoc index 8d762fee05f8789f164b0c5a263e334c28668176..51e7748b67c094194e8b1a810fe92e8cfe4d32d8 100644 --- a/docs/README.adoc +++ b/docs/README.adoc @@ -291,7 +291,7 @@ cargo run --release \-- \ --chain=local \ --alice \ --node-key 0000000000000000000000000000000000000000000000000000000000000001 \ - --telemetry-url ws://telemetry.polkadot.io:1024 \ + --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ --validator In the second terminal, we'll run the following to start Bob's Substrate node on a different TCP port of 30334, and with his chain database stored locally at `/tmp/bob`. We'll specify a value for the `--bootnodes` option that will connect his node to Alice's Bootnode ID on TCP port 30333: @@ -303,7 +303,7 @@ cargo run --release \-- \ --chain=local \ --bob \ --port 30334 \ - --telemetry-url ws://telemetry.polkadot.io:1024 \ + --telemetry-url 'ws://telemetry.polkadot.io:1024 0' \ --validator Additional Substrate CLI usage options are available and may be shown by running `cargo run \-- --help`. diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index 5b0b5f096a91fdfea93956119879c8873d714de9..8b242ff0e8ef149e47b86365ba14acf7abe9cc0a 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-assets" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,18 +10,18 @@ description = "FRAME asset management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } [features] default = ["std"] @@ -32,3 +32,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 042ff89913417ac710a20f348662ea6a2d10eceb..388eb7780bd6dc7df7525d9e015c20790b0c4594 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -157,6 +157,7 @@ decl_module! { /// Issue a new class of fungible assets. There are, and will only ever be, `total` /// such assets and they'll all belong to the `origin` initially. It will have an /// identifier `AssetId` instance: this will be specified in the `Issued` event. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn issue(origin, #[compact] total: T::Balance) { let origin = ensure_signed(origin)?; @@ -170,6 +171,7 @@ decl_module! { } /// Move some assets from one holder to another. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn transfer(origin, #[compact] id: T::AssetId, target: ::Source, @@ -188,6 +190,7 @@ decl_module! { } /// Destroy any assets of `id` owned by `origin`. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn destroy(origin, #[compact] id: T::AssetId) { let origin = ensure_signed(origin)?; let balance = >::take((id, &origin)); @@ -228,11 +231,11 @@ decl_error! { decl_storage! { trait Store for Module as Assets { /// The number of units of assets held by any given account. - Balances: map hasher(blake2_256) (T::AssetId, T::AccountId) => T::Balance; + Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; /// The next asset identifier up for grabs. NextAssetId get(fn next_asset_id): T::AssetId; /// The total unit supply of an asset. - TotalSupply: map hasher(blake2_256) T::AssetId => T::Balance; + TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; } } diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index 77abfe4ae1f48aa062c50d4d516e62dd4042631d..36e79116618e35c296590c96a1666e03bd349784 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-aura" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,20 +9,20 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME AURA consensus pallet" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, version = "0.8.0-alpha.2"} -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../session" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, version = "0.8.0-alpha.5"} +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../timestamp" } [dev-dependencies] @@ -46,3 +46,6 @@ std = [ "sp-timestamp/std", "pallet-timestamp/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index c82d1eab588b39fcf27f5924ca5471a8b4b616fd..f67d4ee038ab374a5cd2c75c947f141fc58fdde2 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,20 +9,20 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for authority discovery" [dependencies] -sp-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-authority-discovery = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -pallet-session = { version = "2.0.0-alpha.2", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +pallet-session = { version = "2.0.0-alpha.5", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] @@ -39,3 +39,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 8ee4931e488c3740174706480188dfbbdd5b1857..b8f28b432ba75f14594ee50e9b6fbfeb0ee73312 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -121,6 +121,7 @@ mod tests { type ValidatorId = AuthorityId; type ValidatorIdOf = ConvertInto; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; } impl pallet_session::historical::Trait for Test { diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 2a01cdbd8ad85b22245bf68b8505b4c8be846d6c..fb966113d55ee67fda4dcf946487f82a9185d6a6 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-authorship" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" @@ -9,15 +9,15 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} impl-trait-for-tuples = "0.1.3" [features] @@ -33,3 +33,6 @@ std = [ "sp-io/std", "sp-authorship/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index d3c1bf752aeb8a197333946e3d4d4f8a751b765f..e6249849bf40ce08844e2a562832b984af9ef7ec 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -27,7 +27,7 @@ use frame_support::traits::{FindAuthor, VerifySeal, Get}; use codec::{Encode, Decode}; use frame_system::ensure_none; use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use frame_support::weights::SimpleDispatchInfo; +use frame_support::weights::{Weight, SimpleDispatchInfo, WeighData}; use sp_inherents::{InherentIdentifier, ProvideInherent, InherentData}; use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; @@ -185,7 +185,7 @@ decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; - fn on_initialize(now: T::BlockNumber) { + fn on_initialize(now: T::BlockNumber) -> Weight { let uncle_generations = T::UncleGenerations::get(); // prune uncles that are older than the allowed number of generations. if uncle_generations <= now { @@ -196,6 +196,8 @@ decl_module! { ::DidSetUncles::put(false); T::EventHandler::note_author(Self::author()); + + SimpleDispatchInfo::default().weigh_data(()) } fn on_finalize() { @@ -205,7 +207,7 @@ decl_module! { } /// Provide a set of uncles. - #[weight = SimpleDispatchInfo::FixedOperational(10_000)] + #[weight = SimpleDispatchInfo::FixedMandatory(10_000)] fn set_uncles(origin, new_uncles: Vec) -> dispatch::DispatchResult { ensure_none(origin)?; ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 7e39e5bed5c29a607ef3faf077ab1964d3921c57..8f885500224053f5ccb8ecb80615b468a34bc514 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-babe" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,27 +9,23 @@ repository = "https://github.com/paritytech/substrate/" description = "Consensus extension module for BABE consensus. Collects on-chain randomness from VRF outputs and manages epoch transitions." [dependencies] -hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } -pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } -sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/babe" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../session" } +sp-consensus-babe = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/babe" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/vrf" } +sp-io = { path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} [dev-dependencies] -lazy_static = "1.4.0" -parking_lot = "0.10.0" -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -45,6 +41,10 @@ std = [ "sp-timestamp/std", "sp-inherents/std", "sp-consensus-babe/std", + "sp-consensus-vrf/std", "pallet-session/std", "sp-io/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index 4dc9304fa8467f7e2bf36dcda500fb8fda37962e..5f85b910880670e646eb7c903688f754fcefa1b3 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -19,14 +19,17 @@ #![cfg_attr(not(feature = "std"), no_std)] #![forbid(unused_must_use, unsafe_code, unused_variables, unused_must_use)] -#![deny(unused_imports)] -pub use pallet_timestamp; + +use pallet_timestamp; use sp_std::{result, prelude::*}; -use frame_support::{decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}}; +use frame_support::{ + decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}, + weights::{Weight, SimpleDispatchInfo, WeighData}, +}; use sp_timestamp::OnTimestampSet; -use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill, PerThing}; -use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash}; +use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; +use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash, One}; use sp_staking::{ SessionIndex, offence::{Offence, Kind}, @@ -39,7 +42,8 @@ use sp_consensus_babe::{ inherents::{INHERENT_IDENTIFIER, BabeInherentData}, digests::{NextEpochDescriptor, RawPreDigest}, }; -pub use sp_consensus_babe::{AuthorityId, VRF_OUTPUT_LENGTH, PUBLIC_KEY_LENGTH}; +use sp_consensus_vrf::schnorrkel; +pub use sp_consensus_babe::{AuthorityId, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, PUBLIC_KEY_LENGTH}; #[cfg(all(feature = "std", test))] mod tests; @@ -96,12 +100,9 @@ impl EpochChangeTrigger for SameAuthoritiesForever { } } -/// The length of the BABE randomness -pub const RANDOMNESS_LENGTH: usize = 32; - const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; -type MaybeVrf = Option<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; +type MaybeVrf = Option; decl_storage! { trait Store for Module as Babe { @@ -131,10 +132,10 @@ decl_storage! { // NOTE: the following fields don't use the constants to define the // array size because the metadata API currently doesn't resolve the // variable to its underlying value. - pub Randomness get(fn randomness): [u8; 32 /* RANDOMNESS_LENGTH */]; + pub Randomness get(fn randomness): schnorrkel::Randomness; /// Next epoch randomness. - NextRandomness: [u8; 32 /* RANDOMNESS_LENGTH */]; + NextRandomness: schnorrkel::Randomness; /// Randomness under construction. /// @@ -146,11 +147,18 @@ decl_storage! { /// We reset all segments and return to `0` at the beginning of every /// epoch. SegmentIndex build(|_| 0): u32; - UnderConstruction: map hasher(blake2_256) u32 => Vec<[u8; 32 /* VRF_OUTPUT_LENGTH */]>; + UnderConstruction: map hasher(twox_64_concat) u32 => Vec; /// Temporary value (cleared at block finalization) which is `Some` /// if per-block initialization has already been called for current block. Initialized get(fn initialized): Option; + + /// How late the current block is compared to its parent. + /// + /// This entry is populated as part of block execution and is cleaned up + /// on block finalization. Querying this storage entry outside of block + /// execution context should always yield zero. + Lateness get(fn lateness): T::BlockNumber; } add_extra_genesis { config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; @@ -173,8 +181,10 @@ decl_module! { const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); /// Initialization - fn on_initialize(now: T::BlockNumber) { + fn on_initialize(now: T::BlockNumber) -> Weight { Self::do_initialize(now); + + SimpleDispatchInfo::default().weigh_data(()) } /// Block finalization @@ -187,6 +197,9 @@ decl_module! { if let Some(Some(vrf_output)) = Initialized::take() { Self::deposit_vrf_output(&vrf_output); } + + // remove temporary "environment" entry from storage + Lateness::::kill(); } } } @@ -210,13 +223,8 @@ impl FindAuthor for Module { { for (id, mut data) in digests.into_iter() { if id == BABE_ENGINE_ID { - let pre_digest = RawPreDigest::decode(&mut data).ok()?; - return Some(match pre_digest { - RawPreDigest::Primary { authority_index, .. } => - authority_index, - RawPreDigest::Secondary { authority_index, .. } => - authority_index, - }); + let pre_digest: RawPreDigest = RawPreDigest::decode(&mut data).ok()?; + return Some(pre_digest.authority_index()) } } @@ -309,12 +317,34 @@ impl Module { // epoch 0 as having started at the slot of block 1. We want to use // the same randomness and validator set as signalled in the genesis, // so we don't rotate the epoch. - now != sp_runtime::traits::One::one() && { + now != One::one() && { let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); diff >= T::EpochDuration::get() } } + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an error internally in the Babe + /// and should not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // -------------- IMPORTANT NOTE -------------- + // This implementation is linked to how [`should_epoch_change`] is working. This might need to + // be updated accordingly, if the underlying mechanics of slot and epochs change. + pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + next_slot + .checked_sub(CurrentSlot::get()) + .map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }) + } + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, /// and the caller is the only caller of this function. /// @@ -326,10 +356,7 @@ impl Module { ) { // PRECONDITION: caller has done initialization and is guaranteed // by the session module to be called before this. - #[cfg(debug_assertions)] - { - assert!(Self::initialized().is_some()) - } + debug_assert!(Self::initialized().is_some()); // Update epoch index let epoch_index = EpochIndex::get() @@ -373,7 +400,7 @@ impl Module { >::deposit_log(log.into()) } - fn deposit_vrf_output(vrf_output: &[u8; VRF_OUTPUT_LENGTH]) { + fn deposit_vrf_output(vrf_output: &schnorrkel::RawVRFOutput) { let segment_idx = ::get(); let mut segment = ::get(&segment_idx); if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { @@ -383,7 +410,7 @@ impl Module { } else { // move onto the next segment and update the index. let segment_idx = segment_idx + 1; - ::insert(&segment_idx, &vec![*vrf_output]); + ::insert(&segment_idx, &vec![vrf_output.clone()]); ::put(&segment_idx); } } @@ -396,7 +423,7 @@ impl Module { return; } - let maybe_pre_digest = >::digest() + let maybe_pre_digest: Option = >::digest() .logs .iter() .filter_map(|s| s.as_pre_runtime()) @@ -426,13 +453,21 @@ impl Module { Self::deposit_consensus(ConsensusLog::NextEpochData(next)) } - CurrentSlot::put(digest.slot_number()); + // the slot number of the current block being initialized + let current_slot = digest.slot_number(); + + // how many slots were skipped between current and last block + let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); + let lateness = T::BlockNumber::from(lateness as u32); - if let RawPreDigest::Primary { vrf_output, .. } = digest { + Lateness::::put(lateness); + CurrentSlot::put(current_slot); + + if let RawPreDigest::Primary(primary) = digest { // place the VRF output into the `Initialized` storage item // and it'll be put onto the under-construction randomness // later, once we've decided which epoch this block is in. - Some(vrf_output) + Some(primary.vrf_output) } else { None } @@ -446,7 +481,7 @@ impl Module { /// Call this function exactly once when an epoch changes, to update the /// randomness. Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> [u8; RANDOMNESS_LENGTH] { + fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { let this_randomness = NextRandomness::get(); let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); @@ -475,6 +510,18 @@ impl OnTimestampSet for Module { fn on_timestamp_set(_moment: T::Moment) { } } +impl frame_support::traits::EstimateNextSessionRotation for Module { + fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { + Self::next_expected_epoch_change(now) + } +} + +impl frame_support::traits::Lateness for Module { + fn lateness(&self) -> T::BlockNumber { + Self::lateness() + } +} + impl sp_runtime::BoundToRuntimeAppPublic for Module { type Public = AuthorityId; } @@ -513,11 +560,11 @@ impl pallet_session::OneSessionHandler for Module { // // an optional size hint as to how many VRF outputs there were may be provided. fn compute_randomness( - last_epoch_randomness: [u8; RANDOMNESS_LENGTH], + last_epoch_randomness: schnorrkel::Randomness, epoch_index: u64, - rho: impl Iterator, + rho: impl Iterator, rho_size_hint: Option, -) -> [u8; RANDOMNESS_LENGTH] { +) -> schnorrkel::Randomness { let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); s.extend_from_slice(&last_epoch_randomness); s.extend_from_slice(&epoch_index.to_le_bytes()); diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 2ec083728e82ceb8488fa9c24efb7223ae5f450e..ea802b268e399c6f71577507df49b60288e880f5 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -16,14 +16,22 @@ //! Test utilities -use super::{Trait, Module, GenesisConfig}; +use codec::Encode; +use super::{Trait, Module, GenesisConfig, CurrentSlot}; use sp_runtime::{ - traits::IdentityLookup, Perbill, testing::{Header, UintAuthorityId}, impl_opaque_keys, + Perbill, impl_opaque_keys, + testing::{Header, UintAuthorityId, Digest, DigestItem}, + traits::IdentityLookup, +}; +use frame_system::InitKind; +use frame_support::{ + impl_outer_origin, parameter_types, StorageValue, + traits::OnInitialize, + weights::Weight, }; -use sp_version::RuntimeVersion; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_io; use sp_core::H256; +use sp_consensus_vrf::schnorrkel::{RawVRFOutput, RawVRFProof}; impl_outer_origin!{ pub enum Origin for Test where system = frame_system {} @@ -43,7 +51,6 @@ parameter_types! { pub const MinimumPeriod: u64 = 1; pub const EpochDuration: u64 = 3; pub const ExpectedBlockTime: u64 = 1; - pub const Version: RuntimeVersion = substrate_test_runtime::VERSION; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); } @@ -53,7 +60,7 @@ impl frame_system::Trait for Test { type BlockNumber = u64; type Call = (); type Hash = H256; - type Version = Version; + type Version = (); type Hashing = sp_runtime::traits::BlakeTwo256; type AccountId = DummyValidatorId; type Lookup = IdentityLookup; @@ -79,11 +86,12 @@ impl pallet_session::Trait for Test { type Event = (); type ValidatorId = ::AccountId; type ShouldEndSession = Babe; - type SessionHandler = (Babe,Babe,); + type SessionHandler = (Babe,); type SessionManager = (); type ValidatorIdOf = (); type Keys = MockSessionKeys; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = Babe; } impl pallet_timestamp::Trait for Test { @@ -106,5 +114,44 @@ pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalit t.into() } +pub fn go_to_block(n: u64, s: u64) { + let pre_digest = make_pre_digest(0, s, RawVRFOutput([1; 32]), RawVRFProof([0xff; 64])); + System::initialize(&n, &Default::default(), &Default::default(), &pre_digest, InitKind::Full); + System::set_block_number(n); + if s > 1 { + CurrentSlot::put(s); + } + // includes a call into `Babe::do_initialize`. + Session::on_initialize(n); +} + +/// Slots will grow accordingly to blocks +pub fn progress_to_block(n: u64) { + let mut slot = Babe::current_slot() + 1; + for i in System::block_number()+1..=n { + go_to_block(i, slot); + slot += 1; + } +} + +pub fn make_pre_digest( + authority_index: sp_consensus_babe::AuthorityIndex, + slot_number: sp_consensus_babe::SlotNumber, + vrf_output: RawVRFOutput, + vrf_proof: RawVRFProof, +) -> Digest { + let digest_data = sp_consensus_babe::digests::RawPreDigest::Primary( + sp_consensus_babe::digests::RawPrimaryPreDigest { + authority_index, + slot_number, + vrf_output, + vrf_proof, + } + ); + let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); + Digest { logs: vec![log] } +} + pub type System = frame_system::Module; pub type Babe = Module; +pub type Session = pallet_session::Module; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 84f8166b10431a9d107cf82d831d4aaa3230dfe1..24aba100178e058112600873602218581613c3b3 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -17,9 +17,10 @@ //! Consensus extension module tests for BABE consensus. use super::*; -use mock::{new_test_ext, Babe, System}; -use sp_runtime::{traits::OnFinalize, testing::{Digest, DigestItem}}; +use mock::*; +use frame_support::traits::OnFinalize; use pallet_session::ShouldEndSession; +use sp_consensus_vrf::schnorrkel::{RawVRFOutput, RawVRFProof}; const EMPTY_RANDOMNESS: [u8; 32] = [ 74, 25, 49, 128, 53, 97, 244, 49, @@ -28,22 +29,6 @@ const EMPTY_RANDOMNESS: [u8; 32] = [ 217, 153, 138, 37, 48, 192, 248, 0, ]; -fn make_pre_digest( - authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, - vrf_output: [u8; sp_consensus_babe::VRF_OUTPUT_LENGTH], - vrf_proof: [u8; sp_consensus_babe::VRF_PROOF_LENGTH], -) -> Digest { - let digest_data = sp_consensus_babe::digests::RawPreDigest::Primary { - authority_index, - slot_number, - vrf_output, - vrf_proof, - }; - let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); - Digest { logs: vec![log] } -} - #[test] fn empty_randomness_is_correct() { let s = compute_randomness([0; RANDOMNESS_LENGTH], 0, std::iter::empty(), None); @@ -70,12 +55,12 @@ fn check_module() { fn first_block_epoch_zero_start() { new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { let genesis_slot = 100; - let first_vrf = [1; 32]; + let first_vrf = RawVRFOutput([1; 32]); let pre_digest = make_pre_digest( 0, genesis_slot, - first_vrf, - [0xff; 64], + first_vrf.clone(), + RawVRFProof([0xff; 64]), ); assert_eq!(Babe::genesis_slot(), 0); @@ -128,3 +113,24 @@ fn authority_index() { "Trivially invalid authorities are ignored") }) } + +#[test] +fn can_predict_next_epoch_change() { + new_test_ext(vec![]).execute_with(|| { + assert_eq!(::EpochDuration::get(), 3); + // this sets the genesis slot to 6; + go_to_block(1, 6); + assert_eq!(Babe::genesis_slot(), 6); + assert_eq!(Babe::current_slot(), 6); + assert_eq!(Babe::epoch_index(), 0); + + progress_to_block(5); + + assert_eq!(Babe::epoch_index(), 5 / 3); + assert_eq!(Babe::current_slot(), 10); + + // next epoch change will be at + assert_eq!(Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now + assert_eq!(Babe::next_expected_epoch_change(System::block_number()), Some(5 + 2)); + }) +} diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index 6bb551c9c6fea03a747e700e5aac6f0d8156f300..f8a59ad15476fbd0c4f69fc418c17c67be3a7506 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-balances" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,17 @@ description = "FRAME pallet to manage balances" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../transaction-payment" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../transaction-payment" } [features] default = ["std"] @@ -35,3 +35,6 @@ std = [ "frame-system/std", ] runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 2473ce292004754574a96be0d07c3d2af3c038a5..3c2067559fcf21dcae48356f0acf00685c7844f2 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -16,6 +16,8 @@ //! Balances pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use super::*; use frame_system::RawOrigin; @@ -49,10 +51,13 @@ benchmarks! { let _ = as Currency<_>>::make_free_balance_be(&caller, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. - let recipient = account("recipient", u, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); + let recipient: T::AccountId = account("recipient", u, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); let transfer_amount = existential_deposit.saturating_mul((e - 1).into()) + 1.into(); }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + verify { + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + } // Benchmark `transfer` with the best possible condition: // * Both accounts exist and will continue to exist. @@ -116,4 +121,46 @@ benchmarks! { let balance_amount = existential_deposit.saturating_mul(e.into()); let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); }: set_balance(RawOrigin::Root, user_lookup, 0.into(), 0.into()) -} \ No newline at end of file +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests_composite::{ExtBuilder, Test}; + use frame_support::assert_ok; + + #[test] + fn transfer() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer::()); + }); + } + + #[test] + fn transfer_best_case() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer_best_case::()); + }); + } + + #[test] + fn transfer_keep_alive() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer_keep_alive::()); + }); + } + + #[test] + fn transfer_set_balance() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_set_balance::()); + }); + } + + #[test] + fn transfer_set_balance_killing() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_set_balance_killing::()); + }); + } +} diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 6f4be24515602f55f1bdbc771f69c1161e418d8a..39e15b3f4f585d38b3d68752b1803782ae0b3619 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -148,14 +148,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(test)] -mod tests_local; -#[cfg(test)] -mod tests_composite; -#[cfg(test)] #[macro_use] mod tests; -#[cfg(feature = "runtime-benchmarks")] +mod tests_local; +mod tests_composite; mod benchmarking; use sp_std::prelude::*; @@ -167,7 +163,7 @@ use frame_support::{ Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status + ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status, } }; use sp_runtime::{ @@ -372,11 +368,11 @@ decl_storage! { /// is ever zero, then the entry *MUST* be removed. /// /// NOTE: This is only used in the case that this module is used to store balances. - pub Account: map hasher(blake2_256) T::AccountId => AccountData; + pub Account: map hasher(blake2_128_concat) T::AccountId => AccountData; /// Any liquidity locks on some account balances. /// NOTE: Should only be accessed when setting, changing and freeing a lock. - pub Locks get(fn locks): map hasher(blake2_256) T::AccountId => Vec>; + pub Locks get(fn locks): map hasher(blake2_128_concat) T::AccountId => Vec>; /// Storage version of the pallet. /// diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 98c7c856bc88a3e74c56c64e77107bc57bacd500..14caf41c1eec67fc455aae0871dd2c439029380e 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -16,6 +16,21 @@ //! Macro for creating the tests for the module. +#![cfg(test)] + +#[derive(Debug)] +pub struct CallWithDispatchInfo; +impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { + type Origin = (); + type Trait = (); + type Info = frame_support::weights::DispatchInfo; + type PostInfo = frame_support::weights::PostDispatchInfo; + fn dispatch(self, _origin: Self::Origin) + -> sp_runtime::DispatchResultWithInfo { + panic!("Do not use dummy implementation for dispatch."); + } +} + #[macro_export] macro_rules! decl_tests { ($test:ty, $ext_builder:ty, $existential_deposit:expr) => { @@ -38,7 +53,7 @@ macro_rules! decl_tests { pub type System = frame_system::Module<$test>; pub type Balances = Module<$test>; - pub const CALL: &<$test as frame_system::Trait>::Call = &(); + pub const CALL: &<$test as frame_system::Trait>::Call = &$crate::tests::CallWithDispatchInfo; /// create a transaction info struct from weight. Handy to avoid building the whole struct. pub fn info_from_weight(w: Weight) -> DispatchInfo { @@ -152,14 +167,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - info_from_weight(1), + &info_from_weight(1), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - info_from_weight(1), + &info_from_weight(1), 1, ).is_ok()); @@ -170,14 +185,14 @@ macro_rules! decl_tests { ChargeTransactionPayment::from(1), &1, CALL, - info_from_weight(1), + &info_from_weight(1), 1, ).is_err()); assert!( as SignedExtension>::pre_dispatch( ChargeTransactionPayment::from(0), &1, CALL, - info_from_weight(1), + &info_from_weight(1), 1, ).is_err()); }); diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 3a5c2178f88cdc2fb8a7f74bc4a78a705588edcc..59c520f4b55b3b4ef7d9318c2c17a58cacaed3e1 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -16,14 +16,20 @@ //! Test utilities -use sp_runtime::{Perbill, traits::{ConvertInto, IdentityLookup}, testing::Header}; +#![cfg(test)] + +use sp_runtime::{ + Perbill, + traits::{ConvertInto, IdentityLookup}, + testing::Header, +}; use sp_core::H256; use sp_io; use frame_support::{impl_outer_origin, parameter_types}; use frame_support::traits::Get; use frame_support::weights::{Weight, DispatchInfo}; use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests}; +use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; impl_outer_origin!{ @@ -52,7 +58,7 @@ impl frame_system::Trait for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = CallWithDispatchInfo; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index 861c1972127a0751dd4372fdec80cb8588d0e1f1..3a9bfb30cecd8ddd18aaa25c9bad5a4e26a4cd44 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -16,14 +16,20 @@ //! Test utilities -use sp_runtime::{Perbill, traits::{ConvertInto, IdentityLookup}, testing::Header}; +#![cfg(test)] + +use sp_runtime::{ + Perbill, + traits::{ConvertInto, IdentityLookup}, + testing::Header, +}; use sp_core::H256; use sp_io; use frame_support::{impl_outer_origin, parameter_types}; use frame_support::traits::{Get, StorageMapShim}; use frame_support::weights::{Weight, DispatchInfo}; use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests}; +use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; impl_outer_origin!{ @@ -52,7 +58,7 @@ impl frame_system::Trait for Test { type Origin = Origin; type Index = u64; type BlockNumber = u64; - type Call = (); + type Call = CallWithDispatchInfo; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = u64; diff --git a/frame/benchmark/Cargo.toml b/frame/benchmark/Cargo.toml index bf237d992cd64cecc2c771a1bec4af4ffc02002d..804a28370449c46056caf5b4e97a515f6e33b9c7 100644 --- a/frame/benchmark/Cargo.toml +++ b/frame/benchmark/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "pallet-benchmark" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Patterns to benchmark in a FRAME runtime." [dependencies] serde = { version = "1.0.101", optional = true } codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.3", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.3", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-alpha.3", default-features = false, path = "../benchmarking" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std"] @@ -27,3 +30,7 @@ std = [ "frame-system/std", "frame-benchmarking/std", ] +runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/benchmark/src/benchmarking.rs b/frame/benchmark/src/benchmarking.rs index 29f9e8ee972a5bf95dd774eaa0cc6dda316e8de1..1e4740da2c0595b45be30c4a1dc2496176f7d157 100644 --- a/frame/benchmark/src/benchmarking.rs +++ b/frame/benchmark/src/benchmarking.rs @@ -16,6 +16,8 @@ //! Benchmarks for common FRAME Pallet operations. +#![cfg(feature = "runtime-benchmarks")] + use super::*; use frame_system::RawOrigin; diff --git a/frame/benchmark/src/lib.rs b/frame/benchmark/src/lib.rs index ef7731eea43d4610a93213843ef04b26d13db1b8..b571ffb5b9cab6272908585270a44e86efa48e6d 100644 --- a/frame/benchmark/src/lib.rs +++ b/frame/benchmark/src/lib.rs @@ -26,7 +26,7 @@ use frame_system::{self as system, ensure_signed}; use codec::{Encode, Decode}; use sp_std::prelude::Vec; -pub mod benchmarking; +mod benchmarking; /// Type alias for currency balance. pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -41,10 +41,10 @@ pub trait Trait: system::Trait { decl_storage! { trait Store for Module as Benchmark { MyMemberList: Vec; - MyMemberMap: map hasher(blake2_256) T::AccountId => bool; + MyMemberMap: map hasher(blake2_128_concat) T::AccountId => bool; MyValue: u32; - MyMap: map hasher(blake2_256) u32 => u32; - MyDoubleMap: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => u32; + MyMap: map hasher(twox_64_concat) u32 => u32; + MyDoubleMap: double_map hasher(twox_64_concat) u32, hasher(identity) u32 => u32; } } @@ -70,6 +70,7 @@ decl_module! { fn deposit_event() = default; /// Do nothing. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn do_nothing(_origin, input: u32) { if input > 0 { return Ok(()); @@ -81,6 +82,7 @@ decl_module! { /// storage database, however, the `repeat` calls will all pull from the /// storage overlay cache. You must consider this when analyzing the /// results of the benchmark. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn read_value(_origin, repeat: u32) { for _ in 0..repeat { MyValue::get(); @@ -88,6 +90,7 @@ decl_module! { } /// Put a value into a storage value. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn put_value(_origin, repeat: u32) { for r in 0..repeat { MyValue::put(r); @@ -99,6 +102,7 @@ decl_module! { /// storage database, however, the `repeat` calls will all pull from the /// storage overlay cache. You must consider this when analyzing the /// results of the benchmark. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn exists_value(_origin, repeat: u32) { for _ in 0..repeat { MyValue::exists(); @@ -106,6 +110,7 @@ decl_module! { } /// Remove a value from storage `repeat` number of times. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn remove_value(_origin, repeat: u32) { for r in 0..repeat { MyMap::remove(r); @@ -113,6 +118,7 @@ decl_module! { } /// Read a value from storage map `repeat` number of times. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn read_map(_origin, repeat: u32) { for r in 0..repeat { MyMap::get(r); @@ -120,6 +126,7 @@ decl_module! { } /// Insert a value into a map. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn insert_map(_origin, repeat: u32) { for r in 0..repeat { MyMap::insert(r, r); @@ -127,6 +134,7 @@ decl_module! { } /// Check is a map contains a value `repeat` number of times. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn contains_key_map(_origin, repeat: u32) { for r in 0..repeat { MyMap::contains_key(r); @@ -134,25 +142,29 @@ decl_module! { } /// Read a value from storage `repeat` number of times. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn remove_prefix(_origin, repeat: u32) { for r in 0..repeat { MyDoubleMap::remove_prefix(r); } } - // Add user to the list. + /// Add user to the list. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn add_member_list(origin) { let who = ensure_signed(origin)?; MyMemberList::::mutate(|x| x.push(who)); } - // Append user to the list. + /// Append user to the list. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn append_member_list(origin) { let who = ensure_signed(origin)?; MyMemberList::::append(&[who])?; } - // Encode a vector of accounts to bytes. + /// Encode a vector of accounts to bytes. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn encode_accounts(_origin, accounts: Vec) { let bytes = accounts.encode(); @@ -163,7 +175,8 @@ decl_module! { } } - // Decode bytes into a vector of accounts. + /// Decode bytes into a vector of accounts. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn decode_accounts(_origin, bytes: Vec) { let accounts: Vec = Decode::decode(&mut bytes.as_slice()).map_err(|_| "Could not decode")?; diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index b39031a6b77f4422b6ea558d0644201cdf8a3b8d..3221c7a1d4d46fd0f2ef4698a3692f3fe76a800e 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,14 +9,16 @@ repository = "https://github.com/paritytech/substrate/" description = "Macro for benchmarking a FRAME runtime." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../primitives/runtime-interface", default-features = false } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime", default-features = false } -sp-std = { version = "2.0.0-alpha.2", path = "../../primitives/std", default-features = false } -sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.2" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +linregress = "0.1" +paste = "0.1" +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-api = { version = "2.0.0-alpha.5", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "2.0.0-alpha.5", path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.5"} +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [features] default = [ "std" ] @@ -29,3 +31,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs new file mode 100644 index 0000000000000000000000000000000000000000..fdf1210832cadc2b06f870ee2373b36d278e2109 --- /dev/null +++ b/frame/benchmarking/src/analysis.rs @@ -0,0 +1,243 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Tools for analysing the benchmark results. + +use std::collections::BTreeMap; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; +use crate::BenchmarkResults; + +pub struct Analysis { + base: u128, + slopes: Vec, + names: Vec, + value_dists: Option, u128, u128)>>, + model: Option, +} + +impl Analysis { + pub fn median_slopes(r: &Vec) -> Option { + let results = r[0].0.iter().enumerate().map(|(i, &(param, _))| { + let mut counted = BTreeMap::, usize>::new(); + for (params, _, _) in r.iter() { + let mut p = params.iter().map(|x| x.1).collect::>(); + p[i] = 0; + *counted.entry(p).or_default() += 1; + } + let others: Vec = counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); + let values = r.iter() + .filter(|v| + v.0.iter() + .map(|x| x.1) + .zip(others.iter()) + .enumerate() + .all(|(j, (v1, v2))| j == i || v1 == *v2) + ).map(|(ps, v, _)| (ps[i].1, *v)) + .collect::>(); + (format!("{:?}", param), i, others, values) + }).collect::>(); + + let models = results.iter().map(|(_, _, _, ref values)| { + let mut slopes = vec![]; + for (i, &(x1, y1)) in values.iter().enumerate() { + for &(x2, y2) in values.iter().skip(i + 1) { + if x1 != x2 { + slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + } + } + } + slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let slope = slopes[slopes.len() / 2]; + + let mut offsets = vec![]; + for &(x, y) in values.iter() { + offsets.push(y as f64 - slope * x as f64); + } + offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let offset = offsets[offsets.len() / 2]; + + (offset, slope) + }).collect::>(); + + let models = models.iter() + .zip(results.iter()) + .map(|((offset, slope), (_, i, others, _))| { + let over = others.iter() + .enumerate() + .filter(|(j, _)| j != i) + .map(|(j, v)| models[j].1 * *v as f64) + .fold(0f64, |acc, i| acc + i); + (*offset - over, *slope) + }) + .collect::>(); + + let base = models[0].0.max(0f64) as u128; + let slopes = models.iter().map(|x| x.1.max(0f64) as u128).collect::>(); + + Some(Self { + base, + slopes, + names: results.into_iter().map(|x| x.0).collect::>(), + value_dists: None, + model: None, + }) + } + + pub fn min_squares_iqr(r: &Vec) -> Option { + let mut results = BTreeMap::, Vec>::new(); + for &(ref params, t, _) in r.iter() { + let p = params.iter().map(|x| x.1).collect::>(); + results.entry(p).or_default().push(t); + } + for (_, rs) in results.iter_mut() { + rs.sort(); + let ql = rs.len() / 4; + *rs = rs[ql..rs.len() - ql].to_vec(); + } + + let mut data = vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; + + let names = r[0].0.iter().map(|x| format!("{:?}", x.0)).collect::>(); + data.extend(names.iter() + .enumerate() + .map(|(i, p)| ( + p.as_str(), + results.iter() + .flat_map(|x| Some(x.0[i] as f64) + .into_iter() + .cycle() + .take(x.1.len()) + ).collect::>() + )) + ); + + let data = RegressionDataBuilder::new().build_from(data).ok()?; + + let model = FormulaRegressionBuilder::new() + .data(&data) + .formula(format!("Y ~ {}", names.join(" + "))) + .fit() + .ok()?; + + let slopes = model.parameters.regressor_values.iter() + .enumerate() + .map(|(_, x)| (*x + 0.5) as u128) + .collect(); + + let value_dists = results.iter().map(|(p, vs)| { + let total = vs.iter() + .fold(0u128, |acc, v| acc + *v); + let mean = total / vs.len() as u128; + let sum_sq_diff = vs.iter() + .fold(0u128, |acc, v| { + let d = mean.max(*v) - mean.min(*v); + acc + d * d + }); + let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; + (p.clone(), mean, stddev) + }).collect::>(); + + Some(Self { + base: (model.parameters.intercept_value + 0.5) as u128, + slopes, + names, + value_dists: Some(value_dists), + model: Some(model), + }) + } +} + +fn ms(mut nanos: u128) -> String { + let mut x = 100_000u128; + while x > 1 { + if nanos > x * 1_000 { + nanos = nanos / x * x; + break; + } + x /= 10; + } + format!("{}", nanos as f64 / 1_000f64) +} + +impl std::fmt::Display for Analysis { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + if let Some(ref value_dists) = self.value_dists { + writeln!(f, "\nData points distribution:")?; + writeln!(f, "{} mean µs sigma µs %", self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" "))?; + for (param_values, mean, sigma) in value_dists.iter() { + writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", + param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), + ms(*mean), + ms(*sigma), + (sigma * 100 / mean), + (sigma * 1000 / mean % 10) + )?; + } + } + if let Some(ref model) = self.model { + writeln!(f, "\nQuality and confidence:")?; + writeln!(f, "param error")?; + for (p, se) in self.names.iter().zip(model.se.regressor_values.iter()) { + writeln!(f, "{} {:>8}", p, ms(*se as u128))?; + } + } + + writeln!(f, "\nModel:")?; + writeln!(f, "Time ~= {:>8}", ms(self.base))?; + for (&t, n) in self.slopes.iter().zip(self.names.iter()) { + writeln!(f, " + {} {:>8}", n, ms(t))?; + } + writeln!(f, " µs") + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::BenchmarkParameter; + + #[test] + fn analysis_median_slopes_should_work() { + let a = Analysis::median_slopes(&vec![ + (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), + (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), + (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), + ]).unwrap(); + assert_eq!(a.base, 10_000_000); + assert_eq!(a.slopes, vec![1_000_000, 100_000]); + } + + #[test] + fn analysis_median_min_squares_should_work() { + let a = Analysis::min_squares_iqr(&vec![ + (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), + (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), + (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), + (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), + ]).unwrap(); + assert_eq!(a.base, 10_000_000); + assert_eq!(a.slopes, vec![1_000_000, 100_000]); + } +} diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index a18048d3053370ca4f89461d83436e00d1a1af74..6bb10f3d972164ab4edb8fd562a97c0e712b575e 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -20,10 +20,16 @@ mod tests; mod utils; +#[cfg(feature = "std")] +mod analysis; + pub use utils::*; +#[cfg(feature = "std")] +pub use analysis::Analysis; #[doc(hidden)] pub use sp_io::storage::root as storage_root; -pub use sp_runtime::traits::Dispatchable; +pub use sp_runtime::traits::{Dispatchable, Zero}; +pub use paste; /// Construct pallet benchmarks for weighing dispatchables. /// @@ -119,6 +125,45 @@ pub use sp_runtime::traits::Dispatchable; /// }: { m.into_iter().collect::() } /// } /// ``` +/// +/// Test functions are automatically generated for each benchmark and are accessible to you when you +/// run `cargo test`. All tests are named `test_benchmark_`, expect you to pass them +/// the Runtime Trait, and run them in a test externalities environment. The test function runs your +/// benchmark just like a regular benchmark, but only testing at the lowest and highest values for +/// each component. The function will return `Ok(())` if the benchmarks return no errors. +/// +/// You can optionally add a `verify` code block at the end of a benchmark to test any final state +/// of your benchmark in a unit test. For example: +/// +/// ```ignore +/// sort_vector { +/// let x in 1 .. 10000; +/// let mut m = Vec::::new(); +/// for i in (0..x).rev() { +/// m.push(i); +/// } +/// }: { +/// m.sort(); +/// } verify { +/// ensure!(m[0] == 0, "You forgot to sort!") +/// } +/// ``` +/// +/// These `verify` blocks will not execute when running your actual benchmarks! +/// +/// You can construct benchmark tests like so: +/// +/// ```ignore +/// #[test] +/// fn test_benchmarks() { +/// new_test_ext().execute_with(|| { +/// assert_ok!(test_benchmark_dummy::()); +/// assert_err!(test_benchmark_other_name::(), "Bad origin"); +/// assert_ok!(test_benchmark_sort_vector::()); +/// assert_err!(test_benchmark_broken_benchmark::(), "You forgot to sort!"); +/// }); +/// } +/// ``` #[macro_export] macro_rules! benchmarks { ( @@ -129,9 +174,31 @@ macro_rules! benchmarks { } $( $rest:tt )* ) => { - $crate::benchmarks_iter!({ - $( { $common , $common_from , $common_to , $common_instancer } )* - } ( ) $( $rest )* ); + $crate::benchmarks_iter!( + NO_INSTANCE + { $( { $common , $common_from , $common_to , $common_instancer } )* } + ( ) + $( $rest )* + ); + } +} + +#[macro_export] +macro_rules! benchmarks_instance { + ( + _ { + $( + let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; + )* + } + $( $rest:tt )* + ) => { + $crate::benchmarks_iter!( + INSTANCE + { $( { $common , $common_from , $common_to , $common_instancer } )* } + ( ) + $( $rest )* + ); } } @@ -140,92 +207,192 @@ macro_rules! benchmarks { macro_rules! benchmarks_iter { // mutation arm: ( + $instance:ident { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $common )* } ( $( $names )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) $( $rest )* + $instance + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: $name ( $origin $( , $arg )* ) + verify $postcode + $( $rest )* } }; - // mutation arm: + // no instance mutation arm: ( + NO_INSTANCE { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + verify $postcode:block $( $rest:tt )* ) => { $crate::benchmarks_iter! { - { $( $common )* } ( $( $names )* ) $name { $( $code )* }: { + NO_INSTANCE + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: { as $crate::Dispatchable>::dispatch(Call::::$dispatch($($arg),*), $origin.into())?; - } $( $rest )* + } + verify $postcode + $( $rest )* + } + }; + // instance mutation arm: + ( + INSTANCE + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + verify $postcode:block + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + INSTANCE + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: { + as $crate::Dispatchable>::dispatch(Call::::$dispatch($($arg),*), $origin.into())?; + } + verify $postcode + $( $rest )* } }; // iteration arm: ( + $instance:ident { $( $common:tt )* } ( $( $names:ident )* ) $name:ident { $( $code:tt )* }: $eval:block + verify $postcode:block $( $rest:tt )* ) => { $crate::benchmark_backend! { - $name { $( $common )* } { } { $eval } { $( $code )* } + $instance + $name + { $( $common )* } + { } + { $eval } + { $( $code )* } + $postcode } - $crate::benchmarks_iter!( { $( $common )* } ( $( $names )* $name ) $( $rest )* ); + $crate::benchmarks_iter!( + $instance + { $( $common )* } + ( $( $names )* $name ) + $( $rest )* + ); }; // iteration-exit arm - ( { $( $common:tt )* } ( $( $names:ident )* ) ) => { - $crate::selected_benchmark!( $( $names ),* ); - $crate::impl_benchmark!( $( $names ),* ); - } + ( $instance:ident { $( $common:tt )* } ( $( $names:ident )* ) ) => { + $crate::selected_benchmark!( $instance $( $names ),* ); + $crate::impl_benchmark!( $instance $( $names ),* ); + #[cfg(test)] + $crate::impl_benchmark_tests!( $instance $( $names ),* ); + }; + // add verify block to _() format + ( + $instance:ident + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + $instance + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: _ ( $origin $( , $arg )* ) + verify { } + $( $rest )* + } + }; + // add verify block to name() format + ( + $instance:ident + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + $instance + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: $dispatch ( $origin $( , $arg )* ) + verify { } + $( $rest )* + } + }; + // add verify block to {} format + ( + $instance:ident + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: $eval:block + $( $rest:tt )* + ) => { + $crate::benchmarks_iter!( + $instance + { $( $common )* } + ( $( $names )* ) + $name { $( $code )* }: $eval + verify { } + $( $rest )* + ); + }; } #[macro_export] #[allow(missing_docs)] macro_rules! benchmark_backend { // parsing arms - ($name:ident { + ($instance:ident $name:ident { $( $common:tt )* } { $( PRE { $( $pre_parsed:tt )* } )* } { $eval:block } { let $pre_id:tt : $pre_ty:ty = $pre_ex:expr; $( $rest:tt )* - } ) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { $( $common )* } { + $instance $name { $( $common )* } { $( PRE { $( $pre_parsed )* } )* PRE { $pre_id , $pre_ty , $pre_ex } - } { $eval } { $( $rest )* } + } { $eval } { $( $rest )* } $postcode } }; - ($name:ident { + ($instance:ident $name:ident { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { $( $common )* } { + $instance $name { $( $common )* } { $( $parsed )* PARAM { $param , $param_from , $param_to , $param_instancer } - } { $eval } { $( $rest )* } + } { $eval } { $( $rest )* } $postcode } }; // mutation arm to look after defaulting to a common param - ($name:ident { + ($instance:ident $name:ident { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( $parsed:tt )* } { $eval:block } { let $param:ident in ...; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { + $instance $name { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -235,20 +402,20 @@ macro_rules! benchmark_backend { .. ({ $( let $common = $common_to; )* $param }) => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); $( $rest )* - } + } $postcode } }; // mutation arm to look after defaulting only the range to common param - ($name:ident { + ($instance:ident $name:ident { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( $parsed:tt )* } { $eval:block } { let $param:ident in _ .. _ => $param_instancer:expr ; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { + $instance $name { $( { $common , $common_from , $common_to , $common_instancer } )* } { $( $parsed )* @@ -258,64 +425,64 @@ macro_rules! benchmark_backend { .. ({ $( let $common = $common_to; )* $param }) => $param_instancer ; $( $rest )* - } + } $postcode } }; // mutation arm to look after a single tt for param_from. - ($name:ident { + ($instance:ident $name:ident { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $common )* } { $( $parsed )* } { $eval } { let $param in ( $param_from ) .. $param_to => $param_instancer; $( $rest )* - } + } $postcode } }; // mutation arm to look after the default tail of `=> ()` - ($name:ident { + ($instance:ident $name:ident { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { let $param:ident in $param_from:tt .. $param_to:expr; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $common )* } { $( $parsed )* } { $eval } { let $param in $param_from .. $param_to => (); $( $rest )* - } + } $postcode } }; // mutation arm to look after `let _ =` - ($name:ident { + ($instance:ident $name:ident { $( $common:tt )* } { $( $parsed:tt )* } { $eval:block } { let $pre_id:tt = $pre_ex:expr; $( $rest:tt )* - }) => { + } $postcode:block) => { $crate::benchmark_backend! { - $name { $( $common )* } { $( $parsed )* } { $eval } { + $instance $name { $( $common )* } { $( $parsed )* } { $eval } { let $pre_id : _ = $pre_ex; $( $rest )* - } + } $postcode } }; - // actioning arm - ($name:ident { + // no instance actioning arm + (NO_INSTANCE $name:ident { $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* } { $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* - } { $eval:block } { $( $post:tt )* } ) => { + } { $eval:block } { $( $post:tt )* } $postcode:block) => { #[allow(non_camel_case_types)] struct $name; #[allow(unused_variables)] @@ -346,6 +513,83 @@ macro_rules! benchmark_backend { Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) } + + fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + $( + let $common = $common_from; + )* + $( + // Prepare instance + let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + )* + $( + let $pre_id : $pre_ty = $pre_ex; + )* + $( $param_instancer ; )* + $( $post )* + + Ok(Box::new(move || -> Result<(), &'static str> { $eval; $postcode; Ok(()) })) + } + } + }; + // instance actioning arm + (INSTANCE $name:ident { + $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* + } { + $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* + $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* + } { $eval:block } { $( $post:tt )* } $postcode:block) => { + #[allow(non_camel_case_types)] + struct $name; + #[allow(unused_variables)] + impl, I: Instance> $crate::BenchmarkingSetupInstance for $name { + fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { + vec! [ + $( + ($crate::BenchmarkParameter::$param, $param_from, $param_to) + ),* + ] + } + + fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + $( + let $common = $common_from; + )* + $( + // Prepare instance + let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + )* + $( + let $pre_id : $pre_ty = $pre_ex; + )* + $( $param_instancer ; )* + $( $post )* + + Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) + } + + fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + $( + let $common = $common_from; + )* + $( + // Prepare instance + let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + )* + $( + let $pre_id : $pre_ty = $pre_ex; + )* + $( $param_instancer ; )* + $( $post )* + + Ok(Box::new(move || -> Result<(), &'static str> { $eval; $postcode; Ok(()) })) + } } } } @@ -367,7 +611,7 @@ macro_rules! benchmark_backend { #[macro_export] macro_rules! selected_benchmark { ( - $( $bench:ident ),* + NO_INSTANCE $( $bench:ident ),* ) => { // The list of available benchmarks for this pallet. #[allow(non_camel_case_types)] @@ -390,25 +634,73 @@ macro_rules! selected_benchmark { $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::instance(&$bench, components), )* } } + + fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + match self { + $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::verify(&$bench, components), )* + } + } } }; + ( + INSTANCE $( $bench:ident ),* + ) => { + // The list of available benchmarks for this pallet. + #[allow(non_camel_case_types)] + enum SelectedBenchmark { + $( $bench, )* + } + + // Allow us to select a benchmark from the list of available benchmarks. + impl, I: Instance> $crate::BenchmarkingSetupInstance for SelectedBenchmark { + fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { + match self { + $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::components(&$bench), )* + } + } + + fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + match self { + $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::instance(&$bench, components), )* + } + } + + fn verify(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + match self { + $( Self::$bench => <$bench as $crate::BenchmarkingSetupInstance>::verify(&$bench, components), )* + } + } + } + } } #[macro_export] macro_rules! impl_benchmark { ( - $( $name:ident ),* + NO_INSTANCE $( $name:ident ),* ) => { - impl $crate::Benchmarking<$crate::BenchmarkResults> for Module { + impl $crate::Benchmarking<$crate::BenchmarkResults> for Module + where T: frame_system::Trait + { + fn benchmarks() -> Vec<&'static [u8]> { + vec![ $( stringify!($name).as_ref() ),* ] + } + fn run_benchmark( - extrinsic: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, + extrinsic: &[u8], + lowest_range_values: &[u32], + highest_range_values: &[u32], + steps: &[u32], repeat: u32, ) -> Result, &'static str> { // Map the input to the selected benchmark. - let extrinsic = sp_std::str::from_utf8(extrinsic.as_slice()) + let extrinsic = sp_std::str::from_utf8(extrinsic) .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; let selected_benchmark = match extrinsic { $( stringify!($name) => SelectedBenchmark::$name, )* @@ -461,15 +753,127 @@ macro_rules! impl_benchmark { // Set up the externalities environment for the setup we want to benchmark. let closure_to_benchmark = >::instance(&selected_benchmark, &c)?; + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); + } + + // Commit the externalities to the database, flushing the DB cache. + // This will enable worst case scenario for reading from the database. + $crate::benchmarking::commit_db(); + + // Time the extrinsic logic. + frame_support::debug::trace!(target: "benchmark", "Start Benchmark: {:?} {:?}", name, component_value); + let start_extrinsic = $crate::benchmarking::current_time(); + closure_to_benchmark()?; + let finish_extrinsic = $crate::benchmarking::current_time(); + let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + frame_support::debug::trace!(target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic); + + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; + + results.push((c.clone(), elapsed_extrinsic, elapsed_storage_root)); + + // Wipe the DB back to the genesis state. + $crate::benchmarking::wipe_db(); + } + } + } + return Ok(results); + } + } + }; + ( + INSTANCE $( $name:ident ),* + ) => { + impl, I: Instance> $crate::Benchmarking<$crate::BenchmarkResults> for Module + where T: frame_system::Trait + { + fn benchmarks() -> Vec<&'static [u8]> { + vec![ $( stringify!($name).as_ref() ),* ] + } + + fn run_benchmark( + extrinsic: &[u8], + lowest_range_values: &[u32], + highest_range_values: &[u32], + steps: &[u32], + repeat: u32, + ) -> Result, &'static str> { + // Map the input to the selected benchmark. + let extrinsic = sp_std::str::from_utf8(extrinsic) + .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; + let selected_benchmark = match extrinsic { + $( stringify!($name) => SelectedBenchmark::$name, )* + _ => return Err("Could not find extrinsic."), + }; + + // Warm up the DB + $crate::benchmarking::commit_db(); + $crate::benchmarking::wipe_db(); + + let components = >::components(&selected_benchmark); + let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); + + // Default number of steps for a component. + let mut prev_steps = 10; + + // Select the component we will be benchmarking. Each component will be benchmarked. + for (idx, (name, low, high)) in components.iter().enumerate() { + // Get the number of steps for this component. + let steps = steps.get(idx).cloned().unwrap_or(prev_steps); + prev_steps = steps; + + let lowest = lowest_range_values.get(idx).cloned().unwrap_or(*low); + let highest = highest_range_values.get(idx).cloned().unwrap_or(*high); + + let diff = highest - lowest; + + // Create up to `STEPS` steps for that component between high and low. + let step_size = (diff / steps).max(1); + let num_of_steps = diff / step_size + 1; + + for s in 0..num_of_steps { + // This is the value we will be testing for component `name` + let component_value = lowest + step_size * s; + + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(idx, (n, _, h))| + if n == name { + (*n, component_value) + } else { + (*n, *highest_range_values.get(idx).unwrap_or(h)) + } + ) + .collect(); + + // Run the benchmark `repeat` times. + for _ in 0..repeat { + // Set up the externalities environment for the setup we want to benchmark. + let closure_to_benchmark = >::instance(&selected_benchmark, &c)?; + + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); + } + // Commit the externalities to the database, flushing the DB cache. // This will enable worst case scenario for reading from the database. $crate::benchmarking::commit_db(); // Time the extrinsic logic. + frame_support::debug::trace!(target: "benchmark", "Start Benchmark: {:?} {:?}", name, component_value); let start_extrinsic = $crate::benchmarking::current_time(); closure_to_benchmark()?; let finish_extrinsic = $crate::benchmarking::current_time(); let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + frame_support::debug::trace!(target: "benchmark", "End Benchmark: {} ns", elapsed_extrinsic); // Time the storage root recalculation. let start_storage_root = $crate::benchmarking::current_time(); @@ -489,3 +893,166 @@ macro_rules! impl_benchmark { } } } + +// This creates unit tests from the main benchmark macro. +// They run the benchmark using the `high` and `low` value for each component +// and ensure that everything completes successfully. +#[macro_export] +macro_rules! impl_benchmark_tests { + ( + NO_INSTANCE + $( $name:ident ),* + ) => { + $( + $crate::paste::item! { + fn [] () -> Result<(), &'static str> + where T: frame_system::Trait + { + let selected_benchmark = SelectedBenchmark::$name; + let components = >::components(&selected_benchmark); + + for (_, (name, low, high)) in components.iter().enumerate() { + // Test only the low and high value, assuming values in the middle won't break + for component_value in vec![low, high] { + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(_, (n, _, h))| + if n == name { + (*n, *component_value) + } else { + (*n, *h) + } + ) + .collect(); + + // Set up the verification state + let closure_to_verify = >::verify(&selected_benchmark, &c)?; + + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); + } + + // Run verification + closure_to_verify()?; + + // Reset the state + $crate::benchmarking::wipe_db(); + } + } + Ok(()) + } + } + )* + }; + ( + INSTANCE + $( $name:ident ),* + ) => { + $( + $crate::paste::item! { + fn [] () -> Result<(), &'static str> + where T: frame_system::Trait + { + let selected_benchmark = SelectedBenchmark::$name; + let components = >::components(&selected_benchmark); + + for (_, (name, low, high)) in components.iter().enumerate() { + // Test only the low and high value, assuming values in the middle won't break + for component_value in vec![low, high] { + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(_, (n, _, h))| + if n == name { + (*n, *component_value) + } else { + (*n, *h) + } + ) + .collect(); + + // Set up the verification state + let closure_to_verify = >::verify(&selected_benchmark, &c)?; + + // Set the block number to at least 1 so events are deposited. + if $crate::Zero::is_zero(&frame_system::Module::::block_number()) { + frame_system::Module::::set_block_number(1.into()); + } + + // Run verification + closure_to_verify()?; + + // Reset the state + $crate::benchmarking::wipe_db(); + } + } + Ok(()) + } + } + )* + }; +} + + +/// This macro adds pallet benchmarks to a `Vec` object. +/// +/// First create an object that holds in the input parameters for the benchmark: +/// +/// ```ignore +/// let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); +/// ``` +/// +/// Then define a mutable local variable to hold your `BenchmarkBatch` object: +/// +/// ```ignore +/// let mut batches = Vec::::new(); +/// ```` +/// +/// Then add the pallets you want to benchmark to this object, including the string +/// you want to use target a particular pallet: +/// +/// ```ignore +/// add_benchmark!(params, batches, b"balances", Balances); +/// add_benchmark!(params, batches, b"identity", Identity); +/// add_benchmark!(params, batches, b"session", SessionBench::); +/// ... +/// ``` +/// +/// At the end of `dispatch_benchmark`, you should return this batches object. +#[macro_export] +macro_rules! add_benchmark { + ( $params:ident, $batches:ident, $name:literal, $( $location:tt )* ) => ( + let (pallet, benchmark, lowest_range_values, highest_range_values, steps, repeat) = $params; + if &pallet[..] == &$name[..] || &pallet[..] == &b"*"[..] { + if &pallet[..] == &b"*"[..] || &benchmark[..] == &b"*"[..] { + for benchmark in $( $location )*::benchmarks().into_iter() { + $batches.push($crate::BenchmarkBatch { + results: $( $location )*::run_benchmark( + benchmark, + &lowest_range_values[..], + &highest_range_values[..], + &steps[..], + repeat, + )?, + pallet: pallet.to_vec(), + benchmark: benchmark.to_vec(), + }); + } + } else { + $batches.push($crate::BenchmarkBatch { + results: $( $location )*::run_benchmark( + &benchmark[..], + &lowest_range_values[..], + &highest_range_values[..], + &steps[..], + repeat, + )?, + pallet: pallet.to_vec(), + benchmark: benchmark.clone(), + }); + } + } + ) +} diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 4327476c4a6c7c59dae5019935ab06879d61e353..50a39d0fcf1bedb997c5989320ec3981de7cf47c 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -22,17 +22,29 @@ use super::*; use codec::Decode; use sp_std::prelude::*; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; -use frame_support::{dispatch::DispatchResult, decl_module, impl_outer_origin}; +use frame_support::{ + dispatch::DispatchResult, decl_module, decl_storage, impl_outer_origin, + assert_ok, assert_err, ensure +}; use frame_system::{RawOrigin, ensure_signed, ensure_none}; +decl_storage! { + trait Store for Module as Test { + Value get(fn value): Option; + } +} + decl_module! { pub struct Module for enum Call where origin: T::Origin { - fn dummy(origin, _n: u32) -> DispatchResult { + #[weight = frame_support::weights::SimpleDispatchInfo::default()] + fn set_value(origin, n: u32) -> DispatchResult { let _sender = ensure_signed(origin)?; + Value::put(n); Ok(()) } - fn other_dummy(origin, _n: u32) -> DispatchResult { + #[weight = frame_support::weights::SimpleDispatchInfo::default()] + fn dummy(origin, _n: u32) -> DispatchResult { let _sender = ensure_none(origin)?; Ok(()) } @@ -94,31 +106,51 @@ benchmarks!{ let b in 1 .. 1000 => (); } - dummy { + set_value { let b in ...; - let caller = account("caller", 0, 0); + let caller = account::("caller", 0, 0); }: _ (RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::get(), Some(b)); + } other_name { let b in ...; - let caller = account("caller", 0, 0); - }: other_dummy (RawOrigin::Signed(caller), b.into()) + }: dummy (RawOrigin::None, b.into()) sort_vector { - let x in 0 .. 10000; + let x in 1 .. 10000; let mut m = Vec::::new(); - for i in 0..x { + for i in (0..x).rev() { m.push(i); } }: { m.sort(); + } verify { + ensure!(m[0] == 0, "You forgot to sort!") + } + + bad_origin { + let b in ...; + let caller = account::("caller", 0, 0); + }: dummy (RawOrigin::Signed(caller), b.into()) + + bad_verify { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { } + verify { + ensure!(m[0] == 0, "You forgot to sort!") } } #[test] fn benchmarks_macro_works() { - // Check benchmark creation for `dummy`. - let selected_benchmark = SelectedBenchmark::dummy; + // Check benchmark creation for `set_value`. + let selected_benchmark = SelectedBenchmark::set_value; let components = >::components(&selected_benchmark); assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); @@ -146,7 +178,7 @@ fn benchmarks_macro_rename_works() { ).expect("failed to create closure"); new_test_ext().execute_with(|| { - assert_eq!(closure(), Err("Bad origin")); + assert_ok!(closure()); }); } @@ -155,7 +187,7 @@ fn benchmarks_macro_works_for_non_dispatchable() { let selected_benchmark = SelectedBenchmark::sort_vector; let components = >::components(&selected_benchmark); - assert_eq!(components, vec![(BenchmarkParameter::x, 0, 10000)]); + assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); let closure = >::instance( &selected_benchmark, @@ -164,3 +196,29 @@ fn benchmarks_macro_works_for_non_dispatchable() { assert_eq!(closure(), Ok(())); } + +#[test] +fn benchmarks_macro_verify_works() { + // Check postcondition for benchmark `set_value` is valid. + let selected_benchmark = SelectedBenchmark::set_value; + + let closure = >::verify( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); +} + +#[test] +fn benchmarks_generate_unit_tests() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_value::()); + assert_ok!(test_benchmark_other_name::()); + assert_ok!(test_benchmark_sort_vector::()); + assert_err!(test_benchmark_bad_origin::(), "Bad origin"); + assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); + }); +} diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index bc6cfbcc86ea1e221cdceb7728fd89b2e7bb2bd9..41b968fbfcad6be7be50a5d737399a9fcd41d0c1 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -22,13 +22,24 @@ use sp_io::hashing::blake2_256; use sp_runtime::RuntimeString; /// An alphabet of possible parameters to use for benchmarking. -#[derive(codec::Encode, codec::Decode, Clone, Copy, PartialEq, Debug)] +#[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] pub enum BenchmarkParameter { a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, } +/// The results of a single of benchmark. +#[derive(Encode, Decode, Clone, PartialEq, Debug)] +pub struct BenchmarkBatch { + /// The pallet containing this benchmark. + pub pallet: Vec, + /// The extrinsic (or benchmark name) of this benchmark. + pub benchmark: Vec, + /// The results from this benchmark. + pub results: Vec, +} + /// Results from running benchmarks on a FRAME pallet. /// Contains duration of the function call in nanoseconds along with the benchmark parameters /// used for that benchmark result. @@ -39,13 +50,13 @@ sp_api::decl_runtime_apis! { pub trait Benchmark { /// Dispatch the given benchmark. fn dispatch_benchmark( - module: Vec, - extrinsic: Vec, + pallet: Vec, + benchmark: Vec, lowest_range_values: Vec, highest_range_values: Vec, steps: Vec, repeat: u32, - ) -> Result, RuntimeString>; + ) -> Result, RuntimeString>; } } @@ -75,19 +86,24 @@ pub trait Benchmarking { /// The pallet benchmarking trait. pub trait Benchmarking { + /// Get the benchmarks available for this pallet. Generally there is one benchmark per + /// extrinsic, so these are sometimes just called "extrinsics". + fn benchmarks() -> Vec<&'static [u8]>; + /// Run the benchmarks for this pallet. /// /// Parameters - /// - `extrinsic`: The name of extrinsic function you want to benchmark encoded as bytes. + /// - `name`: The name of extrinsic function or benchmark you want to benchmark encoded as + /// bytes. /// - `steps`: The number of sample points you want to take across the range of parameters. /// - `lowest_range_values`: The lowest number for each range of parameters. /// - `highest_range_values`: The highest number for each range of parameters. /// - `repeat`: The number of times you want to repeat a benchmark. fn run_benchmark( - extrinsic: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, + name: &[u8], + lowest_range_values: &[u32], + highest_range_values: &[u32], + steps: &[u32], repeat: u32, ) -> Result, &'static str>; } @@ -97,8 +113,23 @@ pub trait BenchmarkingSetup { /// Return the components and their ranges which should be tested in this benchmark. fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; - /// Set up the storage, and prepare a closure to test in a single run of the benchmark. + /// Set up the storage, and prepare a closure to run the benchmark. + fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + + /// Set up the storage, and prepare a closure to test and verify the benchmark + fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; +} + +/// The required setup for creating a benchmark. +pub trait BenchmarkingSetupInstance { + /// Return the components and their ranges which should be tested in this benchmark. + fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; + + /// Set up the storage, and prepare a closure to run the benchmark. fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + + /// Set up the storage, and prepare a closure to test and verify the benchmark + fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; } /// Grab an account, seeded by a name and index. diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 60899feb4fbb96aa7eb44d637c5cae43bd52612a..113705c2c8b90c95f068971bb0f810c7b401debc 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-collective" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,18 @@ description = "Collective system: Members of a set of account IDs can make their [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -34,3 +35,11 @@ std = [ "sp-runtime/std", "frame-system/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..edef5e2e24e050f228edaf1cdcbefccf6a7d623c --- /dev/null +++ b/frame/collective/src/benchmarking.rs @@ -0,0 +1,210 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Staking pallet benchmarking. + +use super::*; + +use frame_system::RawOrigin as SystemOrigin; +use frame_benchmarking::{benchmarks_instance, account}; + +use frame_system::Module as System; +use crate::Module as Collective; + +const SEED: u32 = 0; + +benchmarks_instance! { + _{ + // User account seed. + let u in 1 .. 1000 => (); + // Old members. + let n in 1 .. 1000 => (); + // New members. + let m in 1 .. 1000 => (); + // Existing proposals. + let p in 1 .. 100 => (); + } + + set_members { + let m in ...; + let n in ...; + + // Construct `new_members`. + // It should influence timing since it will sort this vector. + let mut new_members = vec![]; + for i in 0 .. m { + let member = account("member", i, SEED); + new_members.push(member); + } + + // Set old members. + // We compute the difference of old and new members, so it should influence timing. + let mut old_members = vec![]; + for i in 0 .. n { + let old_member = account("old member", i, SEED); + old_members.push(old_member); + } + + let prime = Some(account("prime", 0, SEED)); + + Collective::::set_members(SystemOrigin::Root.into(), old_members, prime.clone())?; + + }: _(SystemOrigin::Root, new_members.clone(), prime) + verify { + new_members.sort(); + assert_eq!(Collective::::members(), new_members); + } + + execute { + let u in ...; + + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + + }: _(SystemOrigin::Signed(caller), Box::new(proposal)) + + propose { + let u in ...; + + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + + let member_count = 0; + + }: _(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) + + propose_else_branch { + let u in ...; + let p in ...; + + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + + let member_count = 3; + + // Add previous proposals. + for i in 0 .. p { + let proposal: T::Proposal = Call::::close(Default::default(), (i + 1).into()).into(); + Collective::::propose(SystemOrigin::Signed(caller.clone()).into(), member_count.clone(), Box::new(proposal.into()))?; + } + + }: propose(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) + + vote { + let u in ...; + + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); + + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + + let member_count = 3; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + + let index = 0; + let approve = true; + + }: _(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + + vote_not_approve { + let u in ...; + + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); + + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + + let member_count = 3; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + + let index = 0; + let approve = false; + + }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + + vote_approved { + let u in ...; + + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); + + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + + let member_count = 2; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + + let index = 0; + let approve = true; + + }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + + close { + let u in ...; + + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); + + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); + + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + let member_count = 2; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + + let index = 0; + let approve = true; + + let vote_end = T::MotionDuration::get() + 1u32.into(); + System::::set_block_number(vote_end); + + }: _(SystemOrigin::Signed(caller2), proposal_hash, index) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_members::()); + assert_ok!(test_benchmark_execute::()); + assert_ok!(test_benchmark_propose::()); + assert_ok!(test_benchmark_propose_else_branch::()); + assert_ok!(test_benchmark_vote::()); + assert_ok!(test_benchmark_vote_not_approve::()); + assert_ok!(test_benchmark_vote_approved::()); + assert_ok!(test_benchmark_close::()); + }); + } +} diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index b5620e34065f64415e93de725af1aa6cd372352c..53e9853221f21e026520fd557ee867fdedc50e59 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -39,15 +39,18 @@ use sp_std::{prelude::*, result}; use sp_core::u32_trait::Value as U32; use sp_runtime::RuntimeDebug; -use sp_runtime::traits::{Hash, EnsureOrigin}; +use sp_runtime::traits::Hash; use frame_support::weights::SimpleDispatchInfo; use frame_support::{ dispatch::{Dispatchable, Parameter}, codec::{Encode, Decode}, - traits::{Get, ChangeMembers, InitializeMembers}, decl_module, decl_event, + traits::{Get, ChangeMembers, InitializeMembers, EnsureOrigin}, decl_module, decl_event, decl_storage, decl_error, ensure, }; use frame_system::{self as system, ensure_signed, ensure_root}; +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking; + /// Simple index type for proposal counting. pub type ProposalIndex = u32; @@ -57,12 +60,12 @@ pub type ProposalIndex = u32; /// vote exactly once, therefore also the number of votes for any given motion. pub type MemberCount = u32; -pub trait Trait: frame_system::Trait { +pub trait Trait: frame_system::Trait { /// The outer origin type. type Origin: From>; /// The outer call dispatch type. - type Proposal: Parameter + Dispatchable>::Origin>; + type Proposal: Parameter + Dispatchable>::Origin> + From>; /// The outer event type. type Event: From> + Into<::Event>; @@ -106,10 +109,10 @@ decl_storage! { pub Proposals get(fn proposals): Vec; /// Actual proposal for a given hash, if it's current. pub ProposalOf get(fn proposal_of): - map hasher(blake2_256) T::Hash => Option<>::Proposal>; + map hasher(identity) T::Hash => Option<>::Proposal>; /// Votes on a given proposal, if it is ongoing. pub Voting get(fn voting): - map hasher(blake2_256) T::Hash => Option>; + map hasher(identity) T::Hash => Option>; /// Proposals so far. pub ProposalCount get(fn proposal_count): u32; /// The current members of the collective. This is stored sorted (just by value). @@ -431,7 +434,7 @@ where pub struct EnsureMember(sp_std::marker::PhantomData<(AccountId, I)>); impl< O: Into, O>> + From>, - AccountId, + AccountId: Default, I, > EnsureOrigin for EnsureMember { type Success = AccountId; @@ -441,6 +444,11 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Member(Default::default())) + } } pub struct EnsureMembers(sp_std::marker::PhantomData<(N, AccountId, I)>); @@ -457,6 +465,11 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(N::VALUE, N::VALUE)) + } } pub struct EnsureProportionMoreThan( @@ -476,6 +489,11 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(1u32, 0u32)) + } } pub struct EnsureProportionAtLeast( @@ -495,6 +513,11 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(0u32, 0u32)) + } } #[cfg(test)] @@ -566,20 +589,21 @@ mod tests { } ); - fn make_ext() -> sp_io::TestExternalities { - GenesisConfig { + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = GenesisConfig { collective_Instance1: Some(collective::GenesisConfig { members: vec![1, 2, 3], phantom: Default::default(), }), collective: None, - }.build_storage().unwrap().into() + }.build_storage().unwrap().into(); + ext.execute_with(|| System::set_block_number(1)); + ext } #[test] fn motions_basic_environment_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { assert_eq!(Collective::members(), vec![1, 2, 3]); assert_eq!(Collective::proposals(), Vec::::new()); }); @@ -591,8 +615,7 @@ mod tests { #[test] fn close_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); @@ -608,7 +631,7 @@ mod tests { System::set_block_number(4); assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), @@ -620,8 +643,7 @@ mod tests { #[test] fn close_with_prime_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(3))); @@ -632,7 +654,7 @@ mod tests { System::set_block_number(4); assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), @@ -644,8 +666,7 @@ mod tests { #[test] fn close_with_voting_prime_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(1))); @@ -656,7 +677,7 @@ mod tests { System::set_block_number(4); assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; assert_eq!(System::events(), vec![ record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), @@ -669,8 +690,7 @@ mod tests { #[test] fn removal_of_old_voters_votes_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); let end = 4; @@ -704,8 +724,7 @@ mod tests { #[test] fn removal_of_old_voters_votes_works_with_set_members() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); let end = 4; @@ -739,8 +758,7 @@ mod tests { #[test] fn propose_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash = proposal.blake2_256().into(); let end = 4; @@ -754,7 +772,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Proposed( 1, 0, @@ -769,8 +787,7 @@ mod tests { #[test] fn motions_ignoring_non_collective_proposals_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); assert_noop!( Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone())), @@ -781,8 +798,7 @@ mod tests { #[test] fn motions_ignoring_non_collective_votes_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); @@ -795,7 +811,7 @@ mod tests { #[test] fn motions_ignoring_bad_index_collective_vote_works() { - make_ext().execute_with(|| { + new_test_ext().execute_with(|| { System::set_block_number(3); let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); @@ -809,8 +825,7 @@ mod tests { #[test] fn motions_revoting_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); let end = 4; @@ -835,7 +850,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Proposed( 1, 0, @@ -845,7 +860,7 @@ mod tests { topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Voted( 1, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -861,8 +876,7 @@ mod tests { #[test] fn motions_reproposing_disapproved_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); @@ -875,8 +889,7 @@ mod tests { #[test] fn motions_disapproval_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); @@ -884,7 +897,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1( RawEvent::Proposed( 1, @@ -895,7 +908,7 @@ mod tests { topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Voted( 2, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -906,7 +919,7 @@ mod tests { topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Disapproved( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), )), @@ -918,8 +931,7 @@ mod tests { #[test] fn motions_approval_works() { - make_ext().execute_with(|| { - System::set_block_number(1); + new_test_ext().execute_with(|| { let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); @@ -927,7 +939,7 @@ mod tests { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Proposed( 1, 0, @@ -937,7 +949,7 @@ mod tests { topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Voted( 2, hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), @@ -948,14 +960,14 @@ mod tests { topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Approved( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), )), topics: vec![], }, EventRecord { - phase: Phase::Finalization, + phase: Phase::Initialization, event: Event::collective_Instance1(RawEvent::Executed( hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), false, diff --git a/frame/contracts/COMPLEXITY.md b/frame/contracts/COMPLEXITY.md index 1d8a122aa0039d3c86c069a89842db4e722ca418..7e8c2903c79b9348d2441b46e181a0c59a6562d0 100644 --- a/frame/contracts/COMPLEXITY.md +++ b/frame/contracts/COMPLEXITY.md @@ -166,7 +166,8 @@ This function performs the following steps: In the course of the execution this function can perform up to 2 DB reads to `get_balance` of source and destination accounts. It can also induce up to 2 DB writes via `set_balance` if flushed to the storage. -Moreover, if the source balance goes below `existential_deposit` then the account will be deleted along with all its storage which requires time proportional to the number of storage entries of that account. +Moreover, if the source balance goes below `existential_deposit` then the transfer is denied and +returns with an error. Assuming marshaled size of a balance value is of the constant size we can neglect its effect on the performance. @@ -187,6 +188,23 @@ implementation they just involve a DB read. For subsequent calls and instantiations during contract execution, the initialization requires no expensive operations. +## Terminate + +This function performs the following steps: + +1. Check the calling contract is not already on the callstack by calling `is_live`. +2. `transfer` funds from caller to the beneficiary. +3. Flag the caller contract as deleted in the overlay. + +`is_live` does not do any database access nor does it allocate memory. It walks up the call +stack and therefore executes in linear time depending on size of the call stack. Because +the call stack is of a fixed maximum size we consider this operation as constant time. + +**complexity**: Database accesses as described in Transfer + Removal of the contract. Currently, +we are using child trie removal which is linear in the amount of stored keys. Upcoming changes +will make the account removal constant time. + + ## Call This function receives input data for the contract execution. The execution consists of the following steps: @@ -350,6 +368,20 @@ Loading `init_code` and `input_data` should be charged in any case. **complexity**: All complexity comes from loading buffers and executing `instantiate` executive function. The former component is proportional to the sizes of `init_code`, `value` and `input_data` buffers. The latter component completely depends on the complexity of `instantiate` executive function and also dominated by it. +## ext_terminate + +This function receives the following arguments: + +- `beneficiary`, buffer of a marshaled `AccountId` + +It consists of the following steps: + +1. Loading `beneficiary` buffer from the sandbox memory (see sandboxing memory get) and then decoding it. + +Loading of the `beneficiary` buffer should be charged. This is because the sizes of buffers are specified by the calling code, even though marshaled representations are, essentially, of constant size. This can be fixed by assigning an upper bound for sizes of `AccountId`. + +**complexity**: All complexity comes from loading buffers and executing `terminate` executive function. The former component is proportional to the size of the `beneficiary` buffer. The latter component completely depends on the complexity of `terminate` executive function and also dominated by it. + ## ext_return This function receives a `data` buffer as an argument. Execution of the function consists of the following steps: @@ -440,3 +472,27 @@ function performs a DB read. This function serializes the current block's number into the scratch buffer. **complexity**: Assuming that the block number is of constant size, this function has constant complexity. + +## Built-in hashing functions + +This paragraph concerns the following supported built-in hash functions: + +- `SHA2` with 256-bit width +- `KECCAK` with 256-bit width +- `BLAKE2` with 128-bit and 256-bit widths + +These functions compute a cryptographic hash on the given inputs and copy the +resulting hash directly back into the sandboxed Wasm contract output buffer. + +Execution of the function consists of the following steps: + +1. Load data stored in the input buffer into an intermediate buffer. +2. Compute the cryptographic hash `H` on the intermediate buffer. +3. Copy back the bytes of `H` into the contract side output buffer. + +**complexity**: Complexity is proportional to the size of the input buffer in bytes +as well as to the size of the output buffer in bytes. Also different cryptographic +algorithms have different inherent complexity so users must expect the above +mentioned crypto hashes to have varying gas costs. +The complexity of each cryptographic hash function highly depends on the underlying +implementation. diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 86f39f8a82b94d65e2da3e8535174d2ecde4ea77..a9318002cee2a71d290b56687f7123d5a1f994d7 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,25 +11,25 @@ description = "FRAME pallet for WASM contracts" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } pwasm-utils = { version = "0.12.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } parity-wasm = { version = "0.41.0", default-features = false } wasmi-validation = { version = "0.3.0", default-features = false } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/sandbox" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "common" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/sandbox" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0-alpha.5", default-features = false, path = "common" } [dev-dependencies] wabt = "0.9.2" assert_matches = "1.3.0" hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -pallet-timestamp = { version = "2.0.0-alpha.2", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0-alpha.2", path = "../randomness-collective-flip" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +pallet-timestamp = { version = "2.0.0-alpha.5", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "2.0.0-alpha.5", path = "../randomness-collective-flip" } [features] default = ["std"] @@ -48,3 +48,6 @@ std = [ "wasmi-validation/std", "pallet-contracts-primitives/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 0d009e7c82b41285fe12abdbe68ac07343c80985..d181896bd2a6c2bf4d25524e7fec277823f3cd02 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,9 +10,9 @@ description = "A crate that hosts a common definitions that are relevant for the [dependencies] # This crate should not rely on any of the frame primitives. -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] @@ -21,3 +21,6 @@ std = [ "sp-runtime/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index 092d049c5414ba7964860732fd8f8b3e2e3022a1..fa13d8ec3fe4a3fbca2f89750772a3f3a8f91251 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,18 +9,21 @@ repository = "https://github.com/paritytech/substrate/" description = "Node-specific RPC methods for interaction with contracts." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } jsonrpc-core = "14.0.3" -jsonrpc-core-client = "14.0.3" +jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.3" -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-alpha.2", path = "../../../primitives/rpc" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-alpha.5", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0-alpha.2", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.2", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +pallet-contracts-primitives = { version = "2.0.0-alpha.5", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.5", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index 8435a6c4238c9e07d60c79a49aed9ae282993a76..692bd3f25ef85b3c695240897cc55bb0546eddcd 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,11 +9,11 @@ repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by Contracts RPC extensions." [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../../common" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/runtime" } +pallet-contracts-primitives = { version = "2.0.0-alpha.5", default-features = false, path = "../../common" } [features] default = ["std"] @@ -24,3 +24,6 @@ std = [ "sp-runtime/std", "pallet-contracts-primitives/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 374c55c374d7bf26c0603017f3838b9c0a6a0bb7..165581e67646df41afa60945e5baefd6e62feb30 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -146,16 +146,8 @@ impl AccountDb for DirectAccountDb { let mut total_imbalance = SignedImbalance::zero(); for (address, changed) in s.into_iter() { if let Some(balance) = changed.balance() { - let existed = !T::Currency::total_balance(&address).is_zero(); let imbalance = T::Currency::make_free_balance_be(&address, balance); - let exists = !T::Currency::total_balance(&address).is_zero(); total_imbalance = total_imbalance.merge(imbalance); - if existed && !exists { - // Account killed. This will ultimately lead to calling `OnKilledAccount` callback - // which will make removal of CodeHashOf and AccountStorage for this account. - // In order to avoid writing over the deleted properties we `continue` here. - continue; - } } if changed.code_hash().is_some() diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 19d1d52497f231399497dd608a39800c0bb0d059..402622331d0ecc33ae66a8920b0568aaad099dd2 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -128,6 +128,13 @@ pub trait Ext { gas_meter: &mut GasMeter, ) -> Result<(), DispatchError>; + /// Transfer all funds to `beneficiary` and delete the contract. + fn terminate( + &mut self, + beneficiary: &AccountIdOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError>; + /// Call (possibly transferring some amount of funds) into the specified account. fn call( &mut self, @@ -282,7 +289,7 @@ pub enum DeferredAction { } pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { - pub parent: Option<&'a ExecutionContext<'a, T, V, L>>, + pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, pub self_account: T::AccountId, pub self_trie_id: Option, pub overlay: OverlayAccountDb<'a, T>, @@ -307,7 +314,7 @@ where /// account (not a contract). pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { ExecutionContext { - parent: None, + caller: None, self_trie_id: None, self_account: origin, overlay: OverlayAccountDb::::new(&DirectAccountDb), @@ -325,7 +332,7 @@ where -> ExecutionContext<'b, T, V, L> { ExecutionContext { - parent: Some(self), + caller: Some(self), self_trie_id: trie_id, self_account: dest, overlay: OverlayAccountDb::new(&self.overlay), @@ -428,20 +435,6 @@ where gas_meter, )?; - // Destroy contract if insufficient remaining balance. - if nested.overlay.get_balance(&dest) < nested.config.existential_deposit { - let parent = nested.parent - .expect("a nested execution context must have a parent; qed"); - if parent.is_live(&dest) { - return Err(ExecError { - reason: "contract cannot be destroyed during recursive execution".into(), - buffer: output.data, - }); - } - - nested.overlay.destroy_contract(&dest); - } - Ok(output) } None => Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }), @@ -535,9 +528,37 @@ where Ok((dest, output)) } - fn new_call_context<'b>(&'b mut self, caller: T::AccountId, value: BalanceOf) - -> CallContext<'b, 'a, T, V, L> - { + pub fn terminate( + &mut self, + beneficiary: &T::AccountId, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + let self_id = self.self_account.clone(); + let value = self.overlay.get_balance(&self_id); + if let Some(caller) = self.caller { + if caller.is_live(&self_id) { + return Err(DispatchError::Other( + "Cannot terminate a contract that is present on the call stack", + )); + } + } + transfer( + gas_meter, + TransferCause::Terminate, + &self_id, + beneficiary, + value, + self, + )?; + self.overlay.destroy_contract(&self_id); + Ok(()) + } + + fn new_call_context<'b>( + &'b mut self, + caller: T::AccountId, + value: BalanceOf, + ) -> CallContext<'b, 'a, T, V, L> { let timestamp = self.timestamp.clone(); let block_number = self.block_number.clone(); CallContext { @@ -571,7 +592,7 @@ where /// stack, meaning it is in the middle of an execution. fn is_live(&self, account: &T::AccountId) -> bool { &self.self_account == account || - self.parent.map_or(false, |parent| parent.is_live(account)) + self.caller.map_or(false, |caller| caller.is_live(account)) } } @@ -606,6 +627,7 @@ impl Token for TransferFeeToken> { enum TransferCause { Call, Instantiate, + Terminate, } /// Transfer some funds from `transactor` to `dest`. @@ -642,7 +664,7 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( Instantiate => ContractInstantiate, // Otherwise the fee is to transfer to an account. - Call => TransferFeeKind::Transfer, + Call | Terminate => TransferFeeKind::Transfer, }; TransferFeeToken { kind, @@ -664,11 +686,19 @@ fn transfer<'a, T: Trait, V: Vm, L: Loader>( if to_balance.is_zero() && value < ctx.config.existential_deposit { Err("value too low to create account")? } + + // Only ext_terminate is allowed to bring the sender below the existential deposit + let required_balance = match cause { + Terminate => 0.into(), + _ => ctx.config.existential_deposit + }; + T::Currency::ensure_can_withdraw( transactor, value, WithdrawReason::Transfer.into(), - new_from_balance, + new_from_balance.checked_sub(&required_balance) + .ok_or("brings sender below existential deposit")?, )?; let new_to_balance = match to_balance.checked_add(&value) { @@ -740,6 +770,14 @@ where self.ctx.transfer(to.clone(), value, gas_meter) } + fn terminate( + &mut self, + beneficiary: &AccountIdOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.ctx.terminate(beneficiary, gas_meter) + } + fn call( &mut self, to: &T::AccountId, @@ -1321,7 +1359,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1); + ctx.overlay.set_balance(&ALICE, 100); let result = ctx.instantiate( 1, @@ -1591,6 +1629,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_balance(&BOB, 100); ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); assert_matches!( @@ -1650,6 +1689,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_balance(&BOB, 100); ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); assert_matches!( @@ -1668,6 +1708,45 @@ mod tests { }); } + #[test] + fn termination_from_instantiate_fails() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + + let terminate_ch = loader.insert(|mut ctx| { + ctx.ext.terminate(&ALICE, &mut ctx.gas_meter).unwrap(); + exec_success() + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + + assert_matches!( + ctx.instantiate( + 100, + &mut GasMeter::::with_limit(10000, 1), + &terminate_ch, + vec![], + ), + Err(ExecError { + reason: DispatchError::Other("insufficient remaining balance"), + buffer + }) if buffer == Vec::::new() + ); + + assert_eq!( + &ctx.events(), + &[] + ); + }); + } + #[test] fn rent_allowance() { let vm = MockVm::new(); @@ -1683,7 +1762,7 @@ mod tests { let cfg = Config::preload(); let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1); + ctx.overlay.set_balance(&ALICE, 100); let result = ctx.instantiate( 1, diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index e1022859a1956273b222182ddf2f3356fb30cd47..10938bb7debc15948c7c6af6a30db74c3dba429c 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -98,7 +98,6 @@ mod rent; #[cfg(test)] mod tests; -mod migration; use crate::exec::ExecutionContext; use crate::account_db::{AccountDb, DirectAccountDb}; @@ -114,7 +113,10 @@ use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; use codec::{Codec, Encode, Decode}; use sp_io::hashing::blake2_256; use sp_runtime::{ - traits::{Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, SignedExtension}, + traits::{ + Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, SignedExtension, + DispatchInfoOf, + }, transaction_validity::{ ValidTransaction, InvalidTransaction, TransactionValidity, TransactionValidityError, }, @@ -124,9 +126,8 @@ use frame_support::dispatch::{DispatchResult, Dispatchable}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child, parameter_types, IsSubType, - weights::DispatchInfo, }; -use frame_support::traits::{OnKilledAccount, OnUnbalanced, Currency, Get, Time, Randomness}; +use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; @@ -549,6 +550,7 @@ decl_module! { /// Updates the schedule for metering contracts. /// /// The schedule must have a greater version than the stored schedule. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn update_schedule(origin, schedule: Schedule) -> DispatchResult { ensure_root(origin)?; if >::current_schedule().version >= schedule.version { @@ -563,6 +565,7 @@ decl_module! { /// Stores the given binary Wasm code into the chain's storage and returns its `codehash`. /// You can instantiate contracts only with stored code. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn put_code( origin, #[compact] gas_limit: Gas, @@ -590,6 +593,7 @@ decl_module! { /// * If the account is a regular account, any value will be transferred. /// * If no account exists and the call value is not less than `existential_deposit`, /// a regular account will be created and any value will be transferred. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn call( origin, dest: ::Source, @@ -615,6 +619,7 @@ decl_module! { /// after the execution is saved as the `code` of the account. That code will be invoked /// upon any call received by this account. /// - The contract is initialized. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn instantiate( origin, #[compact] endowment: BalanceOf, @@ -637,6 +642,7 @@ decl_module! { /// /// If contract is not evicted as a result of this call, no actions are taken and /// the sender is not eligible for the reward. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn claim_surcharge(origin, dest: T::AccountId, aux_sender: Option) { let origin = origin.into(); let (signed, rewarded) = match (origin, aux_sender) { @@ -667,10 +673,6 @@ decl_module! { fn on_finalize() { GasSpent::kill(); } - - fn on_runtime_upgrade() { - migration::on_runtime_upgrade::() - } } } @@ -934,30 +936,18 @@ decl_storage! { /// Current cost schedule for contracts. CurrentSchedule get(fn current_schedule) config(): Schedule = Schedule::default(); /// A mapping from an original code hash to the original code, untouched by instrumentation. - pub PristineCode: map hasher(blake2_256) CodeHash => Option>; + pub PristineCode: map hasher(identity) CodeHash => Option>; /// A mapping between an original code hash and instrumented wasm code, ready for execution. - pub CodeStorage: map hasher(blake2_256) CodeHash => Option; + pub CodeStorage: map hasher(identity) CodeHash => Option; /// The subtrie counter. pub AccountCounter: u64 = 0; /// The code associated with a given account. - pub ContractInfoOf: map hasher(blake2_256) T::AccountId => Option>; + pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; /// The price of one unit of gas. GasPrice get(fn gas_price) config(): BalanceOf = 1.into(); } } -// TODO: this should be removed in favour of a self-destruct contract host function allowing the -// contract to delete all storage and the `ContractInfoOf` key and transfer remaining balance to -// some other account. As it stands, it's an economic insecurity on any smart-contract chain. -// https://github.com/paritytech/substrate/issues/4952 -impl OnKilledAccount for Module { - fn on_killed_account(who: &T::AccountId) { - if let Some(ContractInfo::Alive(info)) = >::take(who) { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); - } - } -} - /// In-memory cache of configuration values. /// /// We assume that these values can't be changed in the @@ -1103,7 +1093,6 @@ impl SignedExtension for CheckBlockGasLimit { type AccountId = T::AccountId; type Call = ::Call; type AdditionalSigned = (); - type DispatchInfo = DispatchInfo; type Pre = (); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } @@ -1112,7 +1101,7 @@ impl SignedExtension for CheckBlockGasLimit { &self, _: &Self::AccountId, call: &Self::Call, - _: Self::DispatchInfo, + _: &DispatchInfoOf, _: usize, ) -> TransactionValidity { let call = match call.is_sub_type() { diff --git a/frame/contracts/src/migration.rs b/frame/contracts/src/migration.rs deleted file mode 100644 index 83d3771c83dbb067d00e378571369b739608c4c4..0000000000000000000000000000000000000000 --- a/frame/contracts/src/migration.rs +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Migration code to update storage. - -use super::*; -use frame_support::storage::migration::{put_storage_value, take_storage_value, StorageIterator}; - -pub fn on_runtime_upgrade() { - change_name_contract_to_contracts::() -} - -// Change the storage name used by this pallet from `Contract` to `Contracts`. -// -// Since the format of the storage items themselves have not changed, we do not -// need to keep track of a storage version. If the runtime does not need to be -// upgraded, nothing here will happen anyway. - -fn change_name_contract_to_contracts() { - sp_runtime::print("Migrating Contracts."); - - if let Some(gas_spent) = take_storage_value::(b"Contract", b"GasSpent", &[]) { - put_storage_value(b"Contracts", b"GasSpent", &[], gas_spent); - } - - if let Some(current_schedule) = take_storage_value::(b"Contract", b"CurrentSchedule", &[]) { - put_storage_value(b"Contracts", b"CurrentSchedule", &[], current_schedule); - } - - for (hash, pristine_code) in StorageIterator::>::new(b"Contract", b"PristineCode").drain() { - put_storage_value(b"Contracts", b"PristineCode", &hash, pristine_code); - } - - for (hash, code_storage) in StorageIterator::::new(b"Contract", b"CodeStorage").drain() { - put_storage_value(b"Contracts", b"CodeStorage", &hash, code_storage); - } - - if let Some(current_schedule) = take_storage_value::(b"Contract", b"AccountCounter", &[]) { - put_storage_value(b"Contracts", b"AccountCounter", &[], current_schedule); - } - - for (hash, contract_info_of) in StorageIterator::>::new(b"Contract", b"ContractInfoOf").drain() { - put_storage_value(b"Contracts", b"ContractInfoOf", &hash, contract_info_of); - } - - if let Some(get_price) = take_storage_value::>(b"Contract", b"GetPrice", &[]) { - put_storage_value(b"Contracts", b"GetPrice", &[], get_price); - } -} diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 1b85e32e7ed2cfb06381a5cf908704f568ce9f92..1a5aa08454d14d08d527766a9672a0688e85fe3c 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -117,7 +117,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnKilledAccount = Contracts; + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { type Balance = u64; @@ -279,7 +279,9 @@ impl ExtBuilder { }, gas_price: self.gas_price, }.assimilate_storage(&mut t).unwrap(); - sp_io::TestExternalities::new(t) + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } } @@ -308,7 +310,7 @@ fn refunds_unused_gas() { } #[test] -fn account_removal_removes_storage() { +fn account_removal_does_not_remove_storage() { ExtBuilder::default().existential_deposit(100).build().execute_with(|| { let trie_id1 = ::TrieIdGenerator::trie_id(&1); let trie_id2 = ::TrieIdGenerator::trie_id(&2); @@ -351,14 +353,22 @@ fn account_removal_removes_storage() { // Transfer funds from account 1 of such amount that after this transfer // the balance of account 1 will be below the existential threshold. // - // This should lead to the removal of all storage associated with this account. + // This does not remove the contract storage as we are not notified about a + // account removal. This cannot happen in reality because a contract can only + // remove itself by `ext_terminate`. There is no external event that can remove + // the account appart from that. assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); - // Verify that all entries from account 1 is removed, while - // entries from account 2 is in place. + // Verify that no entries are removed. { - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1).is_none()); - assert!(>::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2).is_none()); + assert_eq!( + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), + Some(b"1".to_vec()) + ); + assert_eq!( + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), + Some(b"2".to_vec()) + ); assert_eq!( >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), @@ -372,39 +382,10 @@ fn account_removal_removes_storage() { }); } -const CODE_RETURN_FROM_START_FN: &str = r#" -(module - (import "env" "ext_return" (func $ext_return (param i32 i32))) - (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) - (import "env" "memory" (memory 1 1)) - - (start $start) - (func $start - (call $ext_deposit_event - (i32.const 0) ;; The topics buffer - (i32.const 0) ;; The topics buffer's length - (i32.const 8) ;; The data buffer - (i32.const 4) ;; The data buffer's length - ) - (call $ext_return - (i32.const 8) - (i32.const 4) - ) - (unreachable) - ) - - (func (export "call") - (unreachable) - ) - (func (export "deploy")) - - (data (i32.const 8) "\01\02\03\04") -) -"#; - #[test] fn instantiate_and_call_and_deposit_event() { - let (wasm, code_hash) = compile_module::(CODE_RETURN_FROM_START_FN).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("return_from_start_fn.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(100).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -422,44 +403,44 @@ fn instantiate_and_call_and_deposit_event() { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances( pallet_balances::RawEvent::Endowed(BOB, 100) ), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::ContractExecution(BOB, vec![1, 2, 3, 4])), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), topics: vec![], } @@ -470,23 +451,6 @@ fn instantiate_and_call_and_deposit_event() { }); } -const CODE_DISPATCH_CALL: &str = r#" -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_dispatch_call - (i32.const 8) ;; Pointer to the start of encoded call buffer - (i32.const 11) ;; Length of the buffer - ) - ) - (func (export "deploy")) - - (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") -) -"#; - #[test] fn dispatch_call() { // This test can fail due to the encoding changes. In case it becomes too annoying @@ -494,7 +458,8 @@ fn dispatch_call() { let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - let (wasm, code_hash) = compile_module::(CODE_DISPATCH_CALL).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("dispatch_call.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -505,17 +470,17 @@ fn dispatch_call() { // wasm source this test will fail and will show you the actual hash. assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, @@ -539,58 +504,58 @@ fn dispatch_call() { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances( pallet_balances::RawEvent::Endowed(BOB, 100) ), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), topics: vec![], }, // Dispatching the call. EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances( pallet_balances::RawEvent::Endowed(CHARLIE, 50) ), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances( pallet_balances::RawEvent::Transfer(BOB, CHARLIE, 50) ), @@ -599,7 +564,7 @@ fn dispatch_call() { // Event emitted as a result of dispatch. EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Dispatched(BOB, true)), topics: vec![], } @@ -607,24 +572,6 @@ fn dispatch_call() { }); } -const CODE_DISPATCH_CALL_THEN_TRAP: &str = r#" -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_dispatch_call - (i32.const 8) ;; Pointer to the start of encoded call buffer - (i32.const 11) ;; Length of the buffer - ) - (unreachable) ;; trap so that the top level transaction fails - ) - (func (export "deploy")) - - (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") -) -"#; - #[test] fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { // This test can fail due to the encoding changes. In case it becomes too annoying @@ -632,7 +579,8 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - let (wasm, code_hash) = compile_module::(CODE_DISPATCH_CALL_THEN_TRAP).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("dispatch_call_then_trap.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -643,17 +591,17 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { // wasm source this test will fail and will show you the actual hash. assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, @@ -681,39 +629,39 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { ); assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances( pallet_balances::RawEvent::Endowed(BOB, 100) ), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), topics: vec![], }, @@ -722,19 +670,10 @@ fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { }); } -const CODE_RUN_OUT_OF_GAS: &str = r#" -(module - (func (export "call") - (loop $inf (br $inf)) ;; just run out of gas - (unreachable) - ) - (func (export "deploy")) -) -"#; - #[test] fn run_out_of_gas() { - let (wasm, code_hash) = compile_module::(CODE_RUN_OUT_OF_GAS).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("run_out_of_gas.wat")) + .unwrap(); ExtBuilder::default() .existential_deposit(50) @@ -767,110 +706,6 @@ fn run_out_of_gas() { }); } -const CODE_SET_RENT: &str = r#" -(module - (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) - (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; insert a value of 4 bytes into storage - (func $call_0 - (call $ext_set_storage - (i32.const 1) - (i32.const 0) - (i32.const 4) - ) - ) - - ;; remove the value inserted by call_1 - (func $call_1 - (call $ext_clear_storage - (i32.const 1) - ) - ) - - ;; transfer 50 to ALICE - (func $call_2 - (call $ext_dispatch_call - (i32.const 68) - (i32.const 11) - ) - ) - - ;; do nothing - (func $call_else) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - ;; Dispatch the call according to input size - (func (export "call") - (local $input_size i32) - (set_local $input_size - (call $ext_scratch_size) - ) - (block $IF_ELSE - (block $IF_2 - (block $IF_1 - (block $IF_0 - (br_table $IF_0 $IF_1 $IF_2 $IF_ELSE - (get_local $input_size) - ) - (unreachable) - ) - (call $call_0) - return - ) - (call $call_1) - return - ) - (call $call_2) - return - ) - (call $call_else) - ) - - ;; Set into storage a 4 bytes value - ;; Set call set_rent_allowance with input - (func (export "deploy") - (local $input_size i32) - (set_local $input_size - (call $ext_scratch_size) - ) - (call $ext_set_storage - (i32.const 0) - (i32.const 0) - (i32.const 4) - ) - (call $ext_scratch_read - (i32.const 0) - (i32.const 0) - (get_local $input_size) - ) - (call $ext_set_rent_allowance - (i32.const 0) - (get_local $input_size) - ) - ) - - ;; Encoding of 10 in balance - (data (i32.const 0) "\28") - - ;; Encoding of call transfer 50 to CHARLIE - (data (i32.const 68) "\00\00\03\00\00\00\00\00\00\00\C8") -) -"#; - /// Input data for each call in set_rent code mod call { pub fn set_storage_4_byte() -> Vec { vec![] } @@ -888,7 +723,7 @@ fn test_set_rent_code_and_hash() { let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -898,17 +733,17 @@ fn test_set_rent_code_and_hash() { // and will show you the actual hash. assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), topics: vec![], }, @@ -918,7 +753,7 @@ fn test_set_rent_code_and_hash() { #[test] fn storage_size() { - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); // Storage size ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -956,7 +791,7 @@ fn initialize_block(number: u64) { #[test] fn deduct_blocks() { - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -1050,7 +885,7 @@ fn claim_surcharge_malus() { /// Claim surcharge with the given trigger_call at the given blocks. /// If `removes` is true then assert that the contract is a tombstone. fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) { - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -1082,7 +917,7 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) /// * if allowance is exceeded /// * if balance is reached and balance < subsistence threshold fn removals(trigger_call: impl Fn() -> bool) { - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); // Balance reached and superior to subsistence threshold ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -1197,7 +1032,7 @@ fn removals(trigger_call: impl Fn() -> bool) { #[test] fn call_removed_contract() { - let (wasm, code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); // Balance reached and superior to subsistence threshold ExtBuilder::default().existential_deposit(50).build().execute_with(|| { @@ -1225,7 +1060,7 @@ fn call_removed_contract() { // Calling a contract that is about to evict shall emit an event. assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), topics: vec![], }, @@ -1239,59 +1074,10 @@ fn call_removed_contract() { }) } -const CODE_CHECK_DEFAULT_RENT_ALLOWANCE: &str = r#" -(module - (import "env" "ext_rent_allowance" (func $ext_rent_allowance)) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call")) - - (func (export "deploy") - ;; fill the scratch buffer with the rent allowance. - (call $ext_rent_allowance) - - ;; assert $ext_scratch_size == 8 - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - ;; assert that contents of the buffer is equal to >::max_value(). - (call $assert - (i64.eq - (i64.load - (i32.const 8) - ) - (i64.const 0xFFFFFFFFFFFFFFFF) - ) - ) - ) -) -"#; - #[test] fn default_rent_allowance_on_instantiate() { - let (wasm, code_hash) = compile_module::(CODE_CHECK_DEFAULT_RENT_ALLOWANCE).unwrap(); + let (wasm, code_hash) = compile_module::( + &load_wasm("check_default_rent_allowance.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -1321,65 +1107,6 @@ fn default_rent_allowance_on_instantiate() { }); } -const CODE_RESTORATION: &str = r#" -(module - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_restore_to" (func $ext_restore_to (param i32 i32 i32 i32 i32 i32 i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "call") - (call $ext_restore_to - ;; Pointer and length of the encoded dest buffer. - (i32.const 256) - (i32.const 8) - ;; Pointer and length of the encoded code hash buffer - (i32.const 264) - (i32.const 32) - ;; Pointer and length of the encoded rent_allowance buffer - (i32.const 296) - (i32.const 8) - ;; Pointer and number of items in the delta buffer. - ;; This buffer specifies multiple keys for removal before restoration. - (i32.const 100) - (i32.const 1) - ) - ) - (func (export "deploy") - ;; Data to restore - (call $ext_set_storage - (i32.const 0) - (i32.const 0) - (i32.const 4) - ) - - ;; ACL - (call $ext_set_storage - (i32.const 100) - (i32.const 0) - (i32.const 4) - ) - ) - - ;; Data to restore - (data (i32.const 0) "\28") - - ;; Buffer that has ACL storage keys. - (data (i32.const 100) "\01") - - ;; Address of bob - (data (i32.const 256) "\02\00\00\00\00\00\00\00") - - ;; Code hash of SET_RENT - (data (i32.const 264) - "\c2\1c\41\10\a5\22\d8\59\1c\4c\77\35\dd\2d\bf\a1" - "\13\0b\50\93\76\9b\92\31\97\b7\c5\74\26\aa\38\2a" - ) - - ;; Rent allowance - (data (i32.const 296) "\32\00\00\00\00\00\00\00") -) -"#; - #[test] fn restorations_dirty_storage_and_different_storage() { restoration(true, true); @@ -1401,9 +1128,10 @@ fn restoration_success() { } fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: bool) { - let (set_rent_wasm, set_rent_code_hash) = compile_module::(CODE_SET_RENT).unwrap(); + let (set_rent_wasm, set_rent_code_hash) = + compile_module::(&load_wasm("set_rent.wat")).unwrap(); let (restoration_wasm, restoration_code_hash) = - compile_module::(CODE_RESTORATION).unwrap(); + compile_module::(&load_wasm("restoration.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -1414,22 +1142,22 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: // and will show you the actual hash. assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(restoration_code_hash.into())), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::CodeStored(set_rent_code_hash.into())), topics: vec![], }, @@ -1473,7 +1201,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts( RawEvent::Evicted(BOB.clone(), true) ), @@ -1526,7 +1254,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: (true, false) => { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts( RawEvent::Restored(DJANGO, BOB, bob_code_hash, 50, false) ), @@ -1537,42 +1265,42 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: (_, true) => { assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(CHARLIE, 1_000_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(frame_system::RawEvent::NewAccount(DJANGO)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(DJANGO, 30_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Transfer(CHARLIE, DJANGO, 30_000)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts(RawEvent::Restored( DJANGO, BOB, @@ -1599,12 +1327,12 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert!(ContractInfoOf::::get(DJANGO).is_none()); assert_eq!(System::events(), vec![ EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), topics: vec![], }, EventRecord { - phase: Phase::ApplyExtrinsic(0), + phase: Phase::Initialization, event: MetaEvent::contracts( RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50, true) ), @@ -1615,72 +1343,10 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: }); } -const CODE_STORAGE_SIZE: &str = r#" -(module - (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "memory" (memory 16 16)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "call") - ;; assert $ext_scratch_size == 8 - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 4) - ) - ) - - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 32) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 4) ;; Count of bytes to copy. - ) - - ;; place a garbage value in storage, the size of which is specified by the call input. - (call $ext_set_storage - (i32.const 0) ;; Pointer to storage key - (i32.const 0) ;; Pointer to value - (i32.load (i32.const 32)) ;; Size of value - ) - - (call $assert - (i32.eq - (call $ext_get_storage - (i32.const 0) ;; Pointer to storage key - ) - (i32.const 0) - ) - ) - - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.load (i32.const 32)) - ) - ) - ) - - (func (export "deploy")) - - (data (i32.const 0) "\01") ;; Storage key (32 B) -) -"#; - #[test] fn storage_max_value_limit() { - let (wasm, code_hash) = compile_module::(CODE_STORAGE_SIZE).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("storage_size.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -1721,330 +1387,12 @@ fn storage_max_value_limit() { }); } -const CODE_RETURN_WITH_DATA: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - ;; Deploy routine is the same as call. - (func (export "deploy") (result i32) - (call $call) - ) - - ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. - (func $call (export "call") (result i32) - (local $buf_size i32) - (local $exit_status i32) - - ;; Find out the size of the scratch buffer - (set_local $buf_size (call $ext_scratch_size)) - - ;; Copy scratch buffer into this contract memory. - (call $ext_scratch_read - (i32.const 0) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (get_local $buf_size) ;; Count of bytes to copy. - ) - - ;; Copy all but the first 4 bytes of the input data as the output data. - (call $ext_scratch_write - (i32.const 4) ;; Pointer to the data to return. - (i32.sub ;; Count of bytes to copy. - (get_local $buf_size) - (i32.const 4) - ) - ) - - ;; Return the first 4 bytes of the input data as the exit status. - (i32.load (i32.const 0)) - ) -) -"#; - -const CODE_CALLER_CONTRACT: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "ext_println" (func $ext_println (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func $current_balance (param $sp i32) (result i64) - (call $ext_balance) - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 8)) - ) - (call $ext_scratch_read - (i32.sub (get_local $sp) (i32.const 8)) - (i32.const 0) - (i32.const 8) - ) - (i64.load (i32.sub (get_local $sp) (i32.const 8))) - ) - - (func (export "deploy")) - - (func (export "call") - (local $sp i32) - (local $exit_code i32) - (local $balance i64) - - ;; Input data is the code hash of the contract to be deployed. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 32) - ) - ) - - ;; Copy code hash from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 24) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 32) ;; Count of bytes to copy. - ) - - ;; Read current balance into local variable. - (set_local $sp (i32.const 1024)) - (set_local $balance - (call $current_balance (get_local $sp)) - ) - - ;; Fail to deploy the contract since it returns a non-zero exit status. - (set_local $exit_code - (call $ext_instantiate - (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 9) ;; Pointer to input data buffer address - (i32.const 7) ;; Length of input data buffer - ) - ) - - ;; Check non-zero exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x11)) - ) - - ;; Check that scratch buffer is empty since contract instantiation failed. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) - ) - - ;; Check that balance has not changed. - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - - ;; Fail to deploy the contract due to insufficient gas. - (set_local $exit_code - (call $ext_instantiate - (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 200) ;; How much gas to devote for the execution. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 8) ;; Pointer to input data buffer address - (i32.const 8) ;; Length of input data buffer - ) - ) - - ;; Check for special trap exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x0100)) - ) - - ;; Check that scratch buffer is empty since contract instantiation failed. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) - ) - - ;; Check that balance has not changed. - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - - ;; Deploy the contract successfully. - (set_local $exit_code - (call $ext_instantiate - (i32.const 24) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 8) ;; Pointer to input data buffer address - (i32.const 8) ;; Length of input data buffer - ) - ) - - ;; Check for success exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x00)) - ) - - ;; Check that scratch buffer contains the address of the new contract. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 8)) - ) - - ;; Copy contract address from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 16) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - ;; Check that balance has been deducted. - (set_local $balance - (i64.sub (get_local $balance) (i64.load (i32.const 0))) - ) - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - - ;; Call the new contract and expect it to return failing exit code. - (set_local $exit_code - (call $ext_call - (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 9) ;; Pointer to input data buffer address - (i32.const 7) ;; Length of input data buffer - ) - ) - - ;; Check non-zero exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x11)) - ) - - ;; Check that scratch buffer contains the expected return data. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 3)) - ) - (i32.store - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - ) - (call $ext_scratch_read - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - (i32.const 3) - ) - (call $assert - (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) - (i32.const 0x00776655) - ) - ) - - ;; Check that balance has not changed. - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - - ;; Fail to call the contract due to insufficient gas. - (set_local $exit_code - (call $ext_call - (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. - (i64.const 100) ;; How much gas to devote for the execution. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 8) ;; Pointer to input data buffer address - (i32.const 8) ;; Length of input data buffer - ) - ) - - ;; Check for special trap exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x0100)) - ) - - ;; Check that scratch buffer is empty since call trapped. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 0)) - ) - - ;; Check that balance has not changed. - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - - ;; Call the contract successfully. - (set_local $exit_code - (call $ext_call - (i32.const 16) ;; Pointer to "callee" address. - (i32.const 8) ;; Length of "callee" address. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 8) ;; Pointer to input data buffer address - (i32.const 8) ;; Length of input data buffer - ) - ) - - ;; Check for success exit status. - (call $assert - (i32.eq (get_local $exit_code) (i32.const 0x00)) - ) - - ;; Check that scratch buffer contains the expected return data. - (call $assert - (i32.eq (call $ext_scratch_size) (i32.const 4)) - ) - (i32.store - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - ) - (call $ext_scratch_read - (i32.sub (get_local $sp) (i32.const 4)) - (i32.const 0) - (i32.const 4) - ) - (call $assert - (i32.eq - (i32.load (i32.sub (get_local $sp) (i32.const 4))) - (i32.const 0x77665544) - ) - ) - - ;; Check that balance has been deducted. - (set_local $balance - (i64.sub (get_local $balance) (i64.load (i32.const 0))) - ) - (call $assert - (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) - ) - ) - - (data (i32.const 0) "\00\80") ;; The value to transfer on instantiation and calls. - ;; Chosen to be greater than existential deposit. - (data (i32.const 8) "\00\11\22\33\44\55\66\77") ;; The input data to instantiations and calls. -) -"#; - #[test] fn deploy_and_call_other_contract() { - let (callee_wasm, callee_code_hash) = compile_module::(CODE_RETURN_WITH_DATA).unwrap(); - let (caller_wasm, caller_code_hash) = compile_module::(CODE_CALLER_CONTRACT).unwrap(); + let (callee_wasm, callee_code_hash) = + compile_module::(&load_wasm("return_with_data.wat")).unwrap(); + let (caller_wasm, caller_code_hash) = + compile_module::(&load_wasm("caller_contract.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -2074,7 +1422,8 @@ fn deploy_and_call_other_contract() { #[test] fn deploy_works_without_gas_price() { - let (wasm, code_hash) = compile_module::(CODE_GET_RUNTIME_STORAGE).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).gas_price(0).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); @@ -2088,108 +1437,9 @@ fn deploy_works_without_gas_price() { }); } -const CODE_SELF_DESTRUCT: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_address" (func $ext_address)) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy")) - - (func (export "call") - ;; If the input data is not empty, then recursively call self with empty input data. - ;; This should trap instead of self-destructing since a contract cannot be removed live in - ;; the execution stack cannot be removed. If the recursive call traps, then trap here as - ;; well. - (if (call $ext_scratch_size) - (then - (call $ext_address) - - ;; Expect address to be 8 bytes. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - - ;; Read own address into memory. - (call $ext_scratch_read - (i32.const 16) ;; Pointer to write address to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded address - ) - - ;; Recursively call self with empty input data. - (call $assert - (i32.eq - (call $ext_call - (i32.const 16) ;; Pointer to own address - (i32.const 8) ;; Length of own address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - ) - ) - - ;; Send entire remaining balance to the 0 address. - (call $ext_balance) - - ;; Balance should be encoded as a u64. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - - ;; Read balance into memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer to write balance to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded balance - ) - - ;; Self-destruct by sending full balance to the 0 address. - (call $assert - (i32.eq - (call $ext_call - (i32.const 0) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - ) -) -"#; - #[test] -fn self_destruct_by_draining_balance() { - let (wasm, code_hash) = compile_module::(CODE_SELF_DESTRUCT).unwrap(); +fn cannot_self_destruct_through_draning() { + let (wasm, code_hash) = compile_module::(&load_wasm("drain.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); @@ -2209,23 +1459,25 @@ fn self_destruct_by_draining_balance() { Some(ContractInfo::Alive(_)) ); - // Call BOB with no input data, forcing it to self-destruct. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - vec![], - )); - - // Check that BOB is now dead. - assert!(ContractInfoOf::::get(BOB).is_none()); + // Call BOB with no input data, forcing it to run until out-of-balance + // and eventually trapping because below existential deposit. + assert_err!( + Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + vec![], + ), + "contract trapped during execution" + ); }); } #[test] fn cannot_self_destruct_while_live() { - let (wasm, code_hash) = compile_module::(CODE_SELF_DESTRUCT).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); @@ -2266,163 +1518,57 @@ fn cannot_self_destruct_while_live() { }); } -const CODE_DESTROY_AND_TRANSFER: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) - (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy") - ;; Input data is the code hash of the contract to be deployed. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 32) - ) - ) - - ;; Copy code hash from scratch buffer into this contract's memory. - (call $ext_scratch_read - (i32.const 48) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 32) ;; Count of bytes to copy. - ) - - ;; Deploy the contract with the provided code hash. - (call $assert - (i32.eq - (call $ext_instantiate - (i32.const 48) ;; Pointer to the code hash. - (i32.const 32) ;; Length of the code hash. - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer. - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - - ;; Read the address of the instantiated contract into memory. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - (call $ext_scratch_read - (i32.const 80) ;; The pointer where to store the scratch buffer contents, - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - ;; Store the return address. - (call $ext_set_storage - (i32.const 16) ;; Pointer to the key - (i32.const 80) ;; Pointer to the value - (i32.const 8) ;; Length of the value - ) - ) - - (func (export "call") - ;; Read address of destination contract from storage. - (call $assert - (i32.eq - (call $ext_get_storage - (i32.const 16) ;; Pointer to the key - ) - (i32.const 0) - ) - ) - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - (call $ext_scratch_read - (i32.const 80) ;; The pointer where to store the contract address. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 8) ;; Count of bytes to copy. - ) - - ;; Calling the destination contract with non-empty input data should fail. - (call $assert - (i32.eq - (call $ext_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 1) ;; Length of input data buffer - ) - (i32.const 0x0100) - ) - ) - - ;; Call the destination contract regularly, forcing it to self-destruct. - (call $assert - (i32.eq - (call $ext_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - - ;; Calling the destination address with non-empty input data should now work since the - ;; contract has been removed. Also transfer a balance to the address so we can ensure this - ;; does not keep the contract alive. - (call $assert - (i32.eq - (call $ext_call - (i32.const 80) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 0) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 1) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - ) - - (data (i32.const 0) "\00\00\01") ;; Endowment to send when creating contract. - (data (i32.const 8) "") ;; Value to send when calling contract. - (data (i32.const 16) "") ;; The key to store the contract address under. -) -"#; +#[test] +fn self_destruct_works() { + let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")) + .unwrap(); + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check that the BOB contract has been instantiated. + assert_matches!( + ContractInfoOf::::get(BOB), + Some(ContractInfo::Alive(_)) + ); + + // Call BOB without input data which triggers termination. + assert_matches!( + Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + vec![], + ), + Ok(()) + ); + + // Check that account is gone + assert!(ContractInfoOf::::get(BOB).is_none()); + + // check that the beneficiary (django) got remaining balance + assert_eq!(Balances::free_balance(DJANGO), 100_000); + }); +} // This tests that one contract cannot prevent another from self-destructing by sending it // additional funds after it has been drained. #[test] fn destroy_contract_and_transfer_funds() { - let (callee_wasm, callee_code_hash) = compile_module::(CODE_SELF_DESTRUCT).unwrap(); - let (caller_wasm, caller_code_hash) = compile_module::(CODE_DESTROY_AND_TRANSFER).unwrap(); + let (callee_wasm, callee_code_hash) = + compile_module::(&load_wasm("self_destruct.wat")).unwrap(); + let (caller_wasm, caller_code_hash) = + compile_module::(&load_wasm("destroy_and_transfer.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { // Create @@ -2460,72 +1606,16 @@ fn destroy_contract_and_transfer_funds() { }); } -const CODE_SELF_DESTRUCTING_CONSTRUCTOR: &str = r#" -(module - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_balance" (func $ext_balance)) - (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) - (import "env" "memory" (memory 1 1)) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func (export "deploy") - ;; Send entire remaining balance to the 0 address. - (call $ext_balance) - - ;; Balance should be encoded as a u64. - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 8) - ) - ) - - ;; Read balance into memory. - (call $ext_scratch_read - (i32.const 8) ;; Pointer to write balance to - (i32.const 0) ;; Offset into scratch buffer - (i32.const 8) ;; Length of encoded balance - ) - - ;; Self-destruct by sending full balance to the 0 address. - (call $assert - (i32.eq - (call $ext_call - (i32.const 0) ;; Pointer to destination address - (i32.const 8) ;; Length of destination address - (i64.const 0) ;; How much gas to devote for the execution. 0 = all. - (i32.const 8) ;; Pointer to the buffer with value to transfer - (i32.const 8) ;; Length of the buffer with value to transfer - (i32.const 0) ;; Pointer to input data buffer address - (i32.const 0) ;; Length of input data buffer - ) - (i32.const 0) - ) - ) - ) - - (func (export "call")) -) -"#; - #[test] fn cannot_self_destruct_in_constructor() { - let (wasm, code_hash) = compile_module::(CODE_SELF_DESTRUCTING_CONSTRUCTOR).unwrap(); + let (wasm, code_hash) = + compile_module::(&load_wasm("self_destructing_constructor.wat")).unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - // Fail to instantiate the BOB contract since its final balance is below existential - // deposit. + // Fail to instantiate the BOB because the call that is issued in the deploy + // function exhausts all balances which puts it below the existential deposit. assert_err!( Contracts::instantiate( Origin::signed(ALICE), @@ -2534,7 +1624,7 @@ fn cannot_self_destruct_in_constructor() { code_hash.into(), vec![], ), - "insufficient remaining balance" + "contract trapped during execution" ); }); } @@ -2547,94 +1637,18 @@ fn check_block_gas_limit_works() { let call: Call = crate::Call::put_code(1000, vec![]).into(); assert_eq!( - check.validate(&0, &call, info, 0), InvalidTransaction::ExhaustsResources.into(), + check.validate(&0, &call, &info, 0), InvalidTransaction::ExhaustsResources.into(), ); let call: Call = crate::Call::update_schedule(Default::default()).into(); - assert_eq!(check.validate(&0, &call, info, 0), Ok(Default::default())); + assert_eq!(check.validate(&0, &call, &info, 0), Ok(Default::default())); }); } -const CODE_GET_RUNTIME_STORAGE: &str = r#" -(module - (import "env" "ext_get_runtime_storage" - (func $ext_get_runtime_storage (param i32 i32) (result i32)) - ) - (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) - (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) - (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) - (import "env" "memory" (memory 1 1)) - - (func (export "deploy")) - - (func $assert (param i32) - (block $ok - (br_if $ok - (get_local 0) - ) - (unreachable) - ) - ) - - (func $call (export "call") - ;; Load runtime storage for the first key and assert that it exists. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 16) - (i32.const 4) - ) - (i32.const 0) - ) - ) - - ;; assert $ext_scratch_size == 4 - (call $assert - (i32.eq - (call $ext_scratch_size) - (i32.const 4) - ) - ) - - ;; copy contents of the scratch buffer into the contract's memory. - (call $ext_scratch_read - (i32.const 4) ;; Pointer in memory to the place where to copy. - (i32.const 0) ;; Offset from the start of the scratch buffer. - (i32.const 4) ;; Count of bytes to copy. - ) - - ;; assert that contents of the buffer is equal to the i32 value of 0x14144020. - (call $assert - (i32.eq - (i32.load - (i32.const 4) - ) - (i32.const 0x14144020) - ) - ) - - ;; Load the second key and assert that it doesn't exist. - (call $assert - (i32.eq - (call $ext_get_runtime_storage - (i32.const 20) - (i32.const 4) - ) - (i32.const 1) - ) - ) - ) - - ;; The first key, 4 bytes long. - (data (i32.const 16) "\01\02\03\04") - ;; The second key, 4 bytes long. - (data (i32.const 20) "\02\03\04\05") -) -"#; - #[test] fn get_runtime_storage() { - let (wasm, code_hash) = compile_module::(CODE_GET_RUNTIME_STORAGE).unwrap(); + let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")) + .unwrap(); ExtBuilder::default().existential_deposit(50).build().execute_with(|| { Balances::deposit_creating(&ALICE, 1_000_000); @@ -2660,3 +1674,59 @@ fn get_runtime_storage() { )); }); } + +#[test] +fn crypto_hashes() { + let (wasm, code_hash) = compile_module::(&load_wasm("crypto_hashes.wat")).unwrap(); + + ExtBuilder::default().existential_deposit(50).build().execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the CRYPTO_HASHES contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + // Perform the call. + let input = b"_DEAD_BEEF"; + use sp_io::hashing::*; + // Wraps a hash function into a more dynamic form usable for testing. + macro_rules! dyn_hash_fn { + ($name:ident) => { + Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) + }; + } + // All hash functions and their associated output byte lengths. + let test_cases: &[(Box Box<[u8]>>, usize)] = &[ + (dyn_hash_fn!(sha2_256), 32), + (dyn_hash_fn!(keccak_256), 32), + (dyn_hash_fn!(blake2_256), 32), + (dyn_hash_fn!(blake2_128), 16), + ]; + // Test the given hash functions for the input: "_DEAD_BEEF" + for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { + // We offset data in the contract tables by 1. + let mut params = vec![(n + 1) as u8]; + params.extend_from_slice(input); + let result = >::bare_call( + ALICE, + BOB, + 0, + 100_000, + params, + ).unwrap(); + assert_eq!(result.status, 0); + let expected = hash_fn(input.as_ref()); + assert_eq!(&result.data[..*expected_size], &*expected); + } + }) +} + +fn load_wasm(file_name: &str) -> String { + let path = ["tests/", file_name].concat(); + std::fs::read_to_string(&path).expect(&format!("Unable to read {} file", path)) +} diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 1b0401a5626e6d80ec18af492943f01dd5f9de6b..8911fb72b6130c66fa1cf7d88d6fcc86b062b5dd 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -182,6 +182,12 @@ mod tests { gas_left: u64, } + #[derive(Debug, PartialEq, Eq)] + struct TerminationEntry { + beneficiary: u64, + gas_left: u64, + } + #[derive(Debug, PartialEq, Eq)] struct TransferEntry { to: u64, @@ -195,6 +201,7 @@ mod tests { storage: HashMap>, rent_allowance: u64, instantiates: Vec, + terminations: Vec, transfers: Vec, dispatches: Vec, restores: Vec, @@ -242,7 +249,13 @@ mod tests { let address = self.next_account_id; self.next_account_id += 1; - Ok((address, ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() })) + Ok(( + address, + ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }, + )) } fn transfer( &mut self, @@ -275,6 +288,17 @@ mod tests { // TODO: Add tests for different call outcomes. Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }) } + fn terminate( + &mut self, + beneficiary: &u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.terminations.push(TerminationEntry { + beneficiary: *beneficiary, + gas_left: gas_meter.gas_left(), + }); + Ok(()) + } fn note_dispatch_call(&mut self, call: Call) { self.dispatches.push(DispatchEntry(call)); } @@ -379,6 +403,13 @@ mod tests { ) -> Result<(), DispatchError> { (**self).transfer(to, value, gas_meter) } + fn terminate( + &mut self, + beneficiary: &u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + (**self).terminate(beneficiary, gas_meter) + } fn call( &mut self, to: &u64, @@ -649,6 +680,47 @@ mod tests { ); } + const CODE_TERMINATE: &str = r#" +(module + ;; ext_terminate( + ;; beneficiary_ptr: u32, + ;; beneficiary_len: u32, + ;; ) + (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "env" "memory" (memory 1 1)) + (func (export "call") + (call $ext_terminate + (i32.const 4) ;; Pointer to "beneficiary" address. + (i32.const 8) ;; Length of "beneficiary" address. + ) + ) + (func (export "deploy")) + + ;; Beneficiary AccountId to transfer the funds. + ;; Represented by u64 (8 bytes long) in little endian. + (data (i32.const 4) "\09\00\00\00\00\00\00\00") +) +"#; + + #[test] + fn contract_terminate() { + let mut mock_ext = MockExt::default(); + execute( + CODE_TERMINATE, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ).unwrap(); + + assert_eq!( + &mock_ext.terminations, + &[TerminationEntry { + beneficiary: 0x09, + gas_left: 49989, + }] + ); + } + const CODE_TRANSFER_LIMITED_GAS: &str = r#" (module ;; ext_call( diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 24609446873140a1d5dec4b29ec7bdf712a9e557..7cede5542fc6f8bde389f6b99fc960b7b3512e24 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -26,6 +26,12 @@ use frame_system; use sp_std::{prelude::*, mem, convert::TryInto}; use codec::{Decode, Encode}; use sp_runtime::traits::{Bounded, SaturatedConversion}; +use sp_io::hashing::{ + keccak_256, + blake2_256, + blake2_128, + sha2_256, +}; /// The value returned from ext_call and ext_instantiate contract external functions if the call or /// instantiation traps. This value is chosen as if the execution does not trap, the return value @@ -41,6 +47,9 @@ enum SpecialTrap { Return(Vec), /// Signals that trap was generated because the contract exhausted its gas limit. OutOfGas, + /// Signals that a trap was generated in response to a succesful call to the + /// `ext_terminate` host function. + Termination, } /// Can only be used for one call. @@ -83,14 +92,20 @@ pub(crate) fn to_execution_result( status: STATUS_SUCCESS, data, }) - } + }, + Some(SpecialTrap::Termination) => { + return Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }) + }, Some(SpecialTrap::OutOfGas) => { return Err(ExecError { reason: "ran out of gas during contract execution".into(), buffer: runtime.scratch_buf, }) - } - _ => (), + }, + None => (), } // Check the exact type of the error. @@ -443,6 +458,9 @@ define_env!(Env, , // changes made by the called contract are reverted. The scratch buffer is filled with the // output data returned by the called contract, even in the case of a failure status. // + // This call fails if it would bring the calling contract below the existential deposit. + // In order to destroy a contract `ext_terminate` must be used. + // // If the contract traps during execution or otherwise fails to complete successfully, then // this function clears the scratch buffer and returns 0x0100. As with a failure status, any // state changes made by the called contract are reverted. @@ -523,6 +541,9 @@ define_env!(Env, , // of the newly instantiated contract. In the case of a failure status, the scratch buffer is // cleared. // + // This call fails if it would bring the calling contract below the existential deposit. + // In order to destroy a contract `ext_terminate` must be used. + // // If the contract traps during execution or otherwise fails to complete successfully, then // this function clears the scratch buffer and returns 0x0100. As with a failure status, any // state changes made by the called contract are reverted. @@ -601,6 +622,30 @@ define_env!(Env, , } }, + // Remove the calling account and transfer remaining balance. + // + // This function never returns. Either the termination was successful and the + // execution of the destroyed contract is halted. Or it failed during the termination + // which is considered fatal and results in a trap + rollback. + // + // - beneficiary_ptr: a pointer to the address of the beneficiary account where all + // where all remaining funds of the caller are transfered. + // Should be decodable as an `T::AccountId`. Traps otherwise. + // - beneficiary_len: length of the address buffer. + ext_terminate( + ctx, + beneficiary_ptr: u32, + beneficiary_len: u32 + ) => { + let beneficiary: <::T as frame_system::Trait>::AccountId = + read_sandbox_memory_as(ctx, beneficiary_ptr, beneficiary_len)?; + + if let Ok(_) = ctx.ext.terminate(&beneficiary, ctx.gas_meter) { + ctx.special_trap = Some(SpecialTrap::Termination); + } + Err(sp_sandbox::HostError) + }, + // Save a data buffer as a result of the execution, terminate the execution and return a // successful result to the caller. // @@ -974,8 +1019,145 @@ define_env!(Env, , } } }, + + // Computes the SHA2 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + ext_hash_sha2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + compute_hash_on_intermediate_buffer(ctx, sha2_256, input_ptr, input_len, output_ptr) + }, + + // Computes the KECCAK 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + ext_hash_keccak_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + compute_hash_on_intermediate_buffer(ctx, keccak_256, input_ptr, input_len, output_ptr) + }, + + // Computes the BLAKE2 256-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 32 bytes (256 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + ext_hash_blake2_256(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + compute_hash_on_intermediate_buffer(ctx, blake2_256, input_ptr, input_len, output_ptr) + }, + + // Computes the BLAKE2 128-bit hash on the given input buffer. + // + // Returns the result directly into the given output buffer. + // + // # Note + // + // - The `input` and `output` buffer may overlap. + // - The output buffer is expected to hold at least 16 bytes (128 bits). + // - It is the callers responsibility to provide an output buffer that + // is large enough to hold the expected amount of bytes returned by the + // chosen hash function. + // + // # Parameters + // + // - `input_ptr`: the pointer into the linear memory where the input + // data is placed. + // - `input_len`: the length of the input data in bytes. + // - `output_ptr`: the pointer into the linear memory where the output + // data is placed. The function will write the result + // directly into this buffer. + ext_hash_blake2_128(ctx, input_ptr: u32, input_len: u32, output_ptr: u32) => { + compute_hash_on_intermediate_buffer(ctx, blake2_128, input_ptr, input_len, output_ptr) + }, ); +/// Computes the given hash function on the scratch buffer. +/// +/// Reads from the sandboxed input buffer into an intermediate buffer. +/// Returns the result directly to the output buffer of the sandboxed memory. +/// +/// It is the callers responsibility to provide an output buffer that +/// is large enough to hold the expected amount of bytes returned by the +/// chosen hash function. +/// +/// # Note +/// +/// The `input` and `output` buffers may overlap. +fn compute_hash_on_intermediate_buffer( + ctx: &mut Runtime, + hash_fn: F, + input_ptr: u32, + input_len: u32, + output_ptr: u32, +) -> Result<(), sp_sandbox::HostError> +where + E: Ext, + F: FnOnce(&[u8]) -> R, + R: AsRef<[u8]>, +{ + // Copy the input buffer directly into the scratch buffer to avoid + // heap allocations. + let input = read_sandbox_memory(ctx, input_ptr, input_len)?; + // Compute the hash on the scratch buffer using the given hash function. + let hash = hash_fn(&input); + // Write the resulting hash back into the sandboxed output buffer. + write_sandbox_memory( + ctx.schedule, + &mut ctx.special_trap, + ctx.gas_meter, + &ctx.memory, + output_ptr, + hash.as_ref(), + )?; + Ok(()) +} + /// Finds duplicates in a given vector. /// /// This function has complexity of O(n log n) and no additional memory is required, although diff --git a/frame/contracts/tests/caller_contract.wat b/frame/contracts/tests/caller_contract.wat new file mode 100644 index 0000000000000000000000000000000000000000..4bc122c0b1863002ffc9cb0b4d8042231c2dd576 --- /dev/null +++ b/frame/contracts/tests/caller_contract.wat @@ -0,0 +1,275 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_balance" (func $ext_balance)) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_println" (func $ext_println (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func $current_balance (param $sp i32) (result i64) + (call $ext_balance) + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 8)) + ) + (call $ext_scratch_read + (i32.sub (get_local $sp) (i32.const 8)) + (i32.const 0) + (i32.const 8) + ) + (i64.load (i32.sub (get_local $sp) (i32.const 8))) + ) + + (func (export "deploy")) + + (func (export "call") + (local $sp i32) + (local $exit_code i32) + (local $balance i64) + + ;; Input data is the code hash of the contract to be deployed. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 32) + ) + ) + + ;; Copy code hash from scratch buffer into this contract's memory. + (call $ext_scratch_read + (i32.const 24) ;; The pointer where to store the scratch buffer contents, + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 32) ;; Count of bytes to copy. + ) + + ;; Read current balance into local variable. + (set_local $sp (i32.const 1024)) + (set_local $balance + (call $current_balance (get_local $sp)) + ) + + ;; Fail to deploy the contract since it returns a non-zero exit status. + (set_local $exit_code + (call $ext_instantiate + (i32.const 24) ;; Pointer to the code hash. + (i32.const 32) ;; Length of the code hash. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 9) ;; Pointer to input data buffer address + (i32.const 7) ;; Length of input data buffer + ) + ) + + ;; Check non-zero exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x11)) + ) + + ;; Check that scratch buffer is empty since contract instantiation failed. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 0)) + ) + + ;; Check that balance has not changed. + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + + ;; Fail to deploy the contract due to insufficient gas. + (set_local $exit_code + (call $ext_instantiate + (i32.const 24) ;; Pointer to the code hash. + (i32.const 32) ;; Length of the code hash. + (i64.const 200) ;; How much gas to devote for the execution. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + ) + ) + + ;; Check for special trap exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x0100)) + ) + + ;; Check that scratch buffer is empty since contract instantiation failed. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 0)) + ) + + ;; Check that balance has not changed. + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + + ;; Deploy the contract successfully. + (set_local $exit_code + (call $ext_instantiate + (i32.const 24) ;; Pointer to the code hash. + (i32.const 32) ;; Length of the code hash. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + ) + ) + + ;; Check for success exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x00)) + ) + + ;; Check that scratch buffer contains the address of the new contract. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 8)) + ) + + ;; Copy contract address from scratch buffer into this contract's memory. + (call $ext_scratch_read + (i32.const 16) ;; The pointer where to store the scratch buffer contents, + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 8) ;; Count of bytes to copy. + ) + + ;; Check that balance has been deducted. + (set_local $balance + (i64.sub (get_local $balance) (i64.load (i32.const 0))) + ) + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + + ;; Call the new contract and expect it to return failing exit code. + (set_local $exit_code + (call $ext_call + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 8) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 9) ;; Pointer to input data buffer address + (i32.const 7) ;; Length of input data buffer + ) + ) + + ;; Check non-zero exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x11)) + ) + + ;; Check that scratch buffer contains the expected return data. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 3)) + ) + (i32.store + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + ) + (call $ext_scratch_read + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + (i32.const 3) + ) + (call $assert + (i32.eq + (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.const 0x00776655) + ) + ) + + ;; Check that balance has not changed. + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + + ;; Fail to call the contract due to insufficient gas. + (set_local $exit_code + (call $ext_call + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 8) ;; Length of "callee" address. + (i64.const 100) ;; How much gas to devote for the execution. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + ) + ) + + ;; Check for special trap exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x0100)) + ) + + ;; Check that scratch buffer is empty since call trapped. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 0)) + ) + + ;; Check that balance has not changed. + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + + ;; Call the contract successfully. + (set_local $exit_code + (call $ext_call + (i32.const 16) ;; Pointer to "callee" address. + (i32.const 8) ;; Length of "callee" address. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 8) ;; Pointer to input data buffer address + (i32.const 8) ;; Length of input data buffer + ) + ) + + ;; Check for success exit status. + (call $assert + (i32.eq (get_local $exit_code) (i32.const 0x00)) + ) + + ;; Check that scratch buffer contains the expected return data. + (call $assert + (i32.eq (call $ext_scratch_size) (i32.const 4)) + ) + (i32.store + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + ) + (call $ext_scratch_read + (i32.sub (get_local $sp) (i32.const 4)) + (i32.const 0) + (i32.const 4) + ) + (call $assert + (i32.eq + (i32.load (i32.sub (get_local $sp) (i32.const 4))) + (i32.const 0x77665544) + ) + ) + + ;; Check that balance has been deducted. + (set_local $balance + (i64.sub (get_local $balance) (i64.load (i32.const 0))) + ) + (call $assert + (i64.eq (get_local $balance) (call $current_balance (get_local $sp))) + ) + ) + + (data (i32.const 0) "\00\80") ;; The value to transfer on instantiation and calls. + ;; Chosen to be greater than existential deposit. + (data (i32.const 8) "\00\11\22\33\44\55\66\77") ;; The input data to instantiations and calls. +) diff --git a/frame/contracts/tests/check_default_rent_allowance.wat b/frame/contracts/tests/check_default_rent_allowance.wat new file mode 100644 index 0000000000000000000000000000000000000000..12b3004adf7dea2c7db532372b0a7f9c867c6d3f --- /dev/null +++ b/frame/contracts/tests/check_default_rent_allowance.wat @@ -0,0 +1,47 @@ +(module + (import "env" "ext_rent_allowance" (func $ext_rent_allowance)) + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call")) + + (func (export "deploy") + ;; fill the scratch buffer with the rent allowance. + (call $ext_rent_allowance) + + ;; assert $ext_scratch_size == 8 + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + + ;; copy contents of the scratch buffer into the contract's memory. + (call $ext_scratch_read + (i32.const 8) ;; Pointer in memory to the place where to copy. + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 8) ;; Count of bytes to copy. + ) + + ;; assert that contents of the buffer is equal to >::max_value(). + (call $assert + (i64.eq + (i64.load + (i32.const 8) + ) + (i64.const 0xFFFFFFFFFFFFFFFF) + ) + ) + ) +) diff --git a/frame/contracts/tests/crypto_hashes.wat b/frame/contracts/tests/crypto_hashes.wat new file mode 100644 index 0000000000000000000000000000000000000000..6dbca33928cb791bb32c2c6c857d687e74271b1e --- /dev/null +++ b/frame/contracts/tests/crypto_hashes.wat @@ -0,0 +1,80 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + + (import "env" "ext_hash_sha2_256" (func $ext_hash_sha2_256 (param i32 i32 i32))) + (import "env" "ext_hash_keccak_256" (func $ext_hash_keccak_256 (param i32 i32 i32))) + (import "env" "ext_hash_blake2_256" (func $ext_hash_blake2_256 (param i32 i32 i32))) + (import "env" "ext_hash_blake2_128" (func $ext_hash_blake2_128 (param i32 i32 i32))) + + (import "env" "memory" (memory 1 1)) + + (type $hash_fn_sig (func (param i32 i32 i32))) + (table 8 funcref) + (elem (i32.const 1) + $ext_hash_sha2_256 + $ext_hash_keccak_256 + $ext_hash_blake2_256 + $ext_hash_blake2_128 + ) + (data (i32.const 1) "20202010201008") ;; Output sizes of the hashes in order in hex. + + ;; Not in use by the tests besides instantiating the contract. + (func (export "deploy")) + + ;; Called by the tests. + ;; + ;; The `call` function expects data in a certain format in the scratch + ;; buffer. + ;; + ;; 1. The first byte encodes an identifier for the crypto hash function + ;; under test. (*) + ;; 2. The rest encodes the input data that is directly fed into the + ;; crypto hash function chosen in 1. + ;; + ;; The `deploy` function then computes the chosen crypto hash function + ;; given the input and puts the result back into the scratch buffer. + ;; After contract execution the test driver then asserts that the returned + ;; values are equal to the expected bytes for the input and chosen hash + ;; function. + ;; + ;; (*) The possible value for the crypto hash identifiers can be found below: + ;; + ;; | value | Algorithm | Bit Width | + ;; |-------|-----------|-----------| + ;; | 0 | SHA2 | 256 | + ;; | 1 | KECCAK | 256 | + ;; | 2 | BLAKE2 | 256 | + ;; | 3 | BLAKE2 | 128 | + ;; --------------------------------- + (func (export "call") (result i32) + (local $chosen_hash_fn i32) + (local $input_ptr i32) + (local $input_len i32) + (local $output_ptr i32) + (local $output_len i32) + (local.set $input_ptr (i32.const 10)) + (call $ext_scratch_read (local.get $input_ptr) (i32.const 0) (call $ext_scratch_size)) + (local.set $chosen_hash_fn (i32.load8_u (local.get $input_ptr))) + (if (i32.gt_u (local.get $chosen_hash_fn) (i32.const 7)) + ;; We check that the chosen hash fn identifier is within bounds: [0,7] + (unreachable) + ) + (local.set $input_ptr (i32.add (local.get $input_ptr) (i32.const 1))) + (local.set $input_len (i32.sub (call $ext_scratch_size) (i32.const 1))) + (local.set $output_ptr (i32.const 100)) + (local.set $output_len (i32.load8_u (local.get $chosen_hash_fn))) + (call_indirect (type $hash_fn_sig) + (local.get $input_ptr) + (local.get $input_len) + (local.get $output_ptr) + (local.get $chosen_hash_fn) ;; Which crypto hash function to execute. + ) + (call $ext_scratch_write + (local.get $output_ptr) ;; Linear memory location of the output buffer. + (local.get $output_len) ;; Number of output buffer bytes. + ) + (i32.const 0) + ) +) diff --git a/frame/contracts/tests/destroy_and_transfer.wat b/frame/contracts/tests/destroy_and_transfer.wat new file mode 100644 index 0000000000000000000000000000000000000000..c8cf7271d74193bccf3d8720c34c12b43471bd45 --- /dev/null +++ b/frame/contracts/tests/destroy_and_transfer.wat @@ -0,0 +1,148 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) + (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_instantiate" (func $ext_instantiate (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy") + ;; Input data is the code hash of the contract to be deployed. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 32) + ) + ) + + ;; Copy code hash from scratch buffer into this contract's memory. + (call $ext_scratch_read + (i32.const 48) ;; The pointer where to store the scratch buffer contents, + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 32) ;; Count of bytes to copy. + ) + + ;; Deploy the contract with the provided code hash. + (call $assert + (i32.eq + (call $ext_instantiate + (i32.const 48) ;; Pointer to the code hash. + (i32.const 32) ;; Length of the code hash. + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer. + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + + ;; Read the address of the instantiated contract into memory. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + (call $ext_scratch_read + (i32.const 80) ;; The pointer where to store the scratch buffer contents, + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 8) ;; Count of bytes to copy. + ) + + ;; Store the return address. + (call $ext_set_storage + (i32.const 16) ;; Pointer to the key + (i32.const 80) ;; Pointer to the value + (i32.const 8) ;; Length of the value + ) + ) + + (func (export "call") + ;; Read address of destination contract from storage. + (call $assert + (i32.eq + (call $ext_get_storage + (i32.const 16) ;; Pointer to the key + ) + (i32.const 0) + ) + ) + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + (call $ext_scratch_read + (i32.const 80) ;; The pointer where to store the contract address. + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 8) ;; Count of bytes to copy. + ) + + ;; Calling the destination contract with non-empty input data should fail. + (call $assert + (i32.eq + (call $ext_call + (i32.const 80) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 1) ;; Length of input data buffer + ) + (i32.const 0x0100) + ) + ) + + ;; Call the destination contract regularly, forcing it to self-destruct. + (call $assert + (i32.eq + (call $ext_call + (i32.const 80) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + + ;; Calling the destination address with non-empty input data should now work since the + ;; contract has been removed. Also transfer a balance to the address so we can ensure this + ;; does not keep the contract alive. + (call $assert + (i32.eq + (call $ext_call + (i32.const 80) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 0) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 1) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + ) + + (data (i32.const 0) "\00\00\01") ;; Endowment to send when creating contract. + (data (i32.const 8) "") ;; Value to send when calling contract. + (data (i32.const 16) "") ;; The key to store the contract address under. +) diff --git a/frame/contracts/tests/dispatch_call.wat b/frame/contracts/tests/dispatch_call.wat new file mode 100644 index 0000000000000000000000000000000000000000..db0995bd6c79ad37a8b71b4db9d88a8682b308e8 --- /dev/null +++ b/frame/contracts/tests/dispatch_call.wat @@ -0,0 +1,14 @@ +(module + (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "call") + (call $ext_dispatch_call + (i32.const 8) ;; Pointer to the start of encoded call buffer + (i32.const 11) ;; Length of the buffer + ) + ) + (func (export "deploy")) + + (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") +) diff --git a/frame/contracts/tests/dispatch_call_then_trap.wat b/frame/contracts/tests/dispatch_call_then_trap.wat new file mode 100644 index 0000000000000000000000000000000000000000..ce949d68236f39846f82434ac45c9b825cedd698 --- /dev/null +++ b/frame/contracts/tests/dispatch_call_then_trap.wat @@ -0,0 +1,15 @@ +(module + (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "call") + (call $ext_dispatch_call + (i32.const 8) ;; Pointer to the start of encoded call buffer + (i32.const 11) ;; Length of the buffer + ) + (unreachable) ;; trap so that the top level transaction fails + ) + (func (export "deploy")) + + (data (i32.const 8) "\00\00\03\00\00\00\00\00\00\00\C8") +) diff --git a/frame/contracts/tests/drain.wat b/frame/contracts/tests/drain.wat new file mode 100644 index 0000000000000000000000000000000000000000..d08e1dd0d2981eb926478c081e8125a1ab3f6cbc --- /dev/null +++ b/frame/contracts/tests/drain.wat @@ -0,0 +1,54 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_balance" (func $ext_balance)) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + (func (export "call") + ;; Send entire remaining balance to the 0 address. + (call $ext_balance) + + ;; Balance should be encoded as a u64. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + + ;; Read balance into memory. + (call $ext_scratch_read + (i32.const 8) ;; Pointer to write balance to + (i32.const 0) ;; Offset into scratch buffer + (i32.const 8) ;; Length of encoded balance + ) + + ;; Self-destruct by sending full balance to the 0 address. + (call $assert + (i32.eq + (call $ext_call + (i32.const 0) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + ) +) diff --git a/frame/contracts/tests/get_runtime_storage.wat b/frame/contracts/tests/get_runtime_storage.wat new file mode 100644 index 0000000000000000000000000000000000000000..6148f1c408c017c5096f055759d1ed66ba9b7104 --- /dev/null +++ b/frame/contracts/tests/get_runtime_storage.wat @@ -0,0 +1,74 @@ +(module + (import "env" "ext_get_runtime_storage" + (func $ext_get_runtime_storage (param i32 i32) (result i32)) + ) + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "deploy")) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func $call (export "call") + ;; Load runtime storage for the first key and assert that it exists. + (call $assert + (i32.eq + (call $ext_get_runtime_storage + (i32.const 16) + (i32.const 4) + ) + (i32.const 0) + ) + ) + + ;; assert $ext_scratch_size == 4 + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 4) + ) + ) + + ;; copy contents of the scratch buffer into the contract's memory. + (call $ext_scratch_read + (i32.const 4) ;; Pointer in memory to the place where to copy. + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 4) ;; Count of bytes to copy. + ) + + ;; assert that contents of the buffer is equal to the i32 value of 0x14144020. + (call $assert + (i32.eq + (i32.load + (i32.const 4) + ) + (i32.const 0x14144020) + ) + ) + + ;; Load the second key and assert that it doesn't exist. + (call $assert + (i32.eq + (call $ext_get_runtime_storage + (i32.const 20) + (i32.const 4) + ) + (i32.const 1) + ) + ) + ) + + ;; The first key, 4 bytes long. + (data (i32.const 16) "\01\02\03\04") + ;; The second key, 4 bytes long. + (data (i32.const 20) "\02\03\04\05") +) diff --git a/frame/contracts/tests/restoration.wat b/frame/contracts/tests/restoration.wat new file mode 100644 index 0000000000000000000000000000000000000000..4e11f97d5a2ccd722a44211082cbd523a9bc4737 --- /dev/null +++ b/frame/contracts/tests/restoration.wat @@ -0,0 +1,56 @@ +(module + (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "env" "ext_restore_to" (func $ext_restore_to (param i32 i32 i32 i32 i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func (export "call") + (call $ext_restore_to + ;; Pointer and length of the encoded dest buffer. + (i32.const 256) + (i32.const 8) + ;; Pointer and length of the encoded code hash buffer + (i32.const 264) + (i32.const 32) + ;; Pointer and length of the encoded rent_allowance buffer + (i32.const 296) + (i32.const 8) + ;; Pointer and number of items in the delta buffer. + ;; This buffer specifies multiple keys for removal before restoration. + (i32.const 100) + (i32.const 1) + ) + ) + (func (export "deploy") + ;; Data to restore + (call $ext_set_storage + (i32.const 0) + (i32.const 0) + (i32.const 4) + ) + + ;; ACL + (call $ext_set_storage + (i32.const 100) + (i32.const 0) + (i32.const 4) + ) + ) + + ;; Data to restore + (data (i32.const 0) "\28") + + ;; Buffer that has ACL storage keys. + (data (i32.const 100) "\01") + + ;; Address of bob + (data (i32.const 256) "\02\00\00\00\00\00\00\00") + + ;; Code hash of SET_RENT + (data (i32.const 264) + "\c2\1c\41\10\a5\22\d8\59\1c\4c\77\35\dd\2d\bf\a1" + "\13\0b\50\93\76\9b\92\31\97\b7\c5\74\26\aa\38\2a" + ) + + ;; Rent allowance + (data (i32.const 296) "\32\00\00\00\00\00\00\00") +) diff --git a/frame/contracts/tests/return_from_start_fn.wat b/frame/contracts/tests/return_from_start_fn.wat new file mode 100644 index 0000000000000000000000000000000000000000..ac898d4d944e9f3b08e9d2a079a8ecbdc9c77658 --- /dev/null +++ b/frame/contracts/tests/return_from_start_fn.wat @@ -0,0 +1,27 @@ +(module + (import "env" "ext_return" (func $ext_return (param i32 i32))) + (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + (start $start) + (func $start + (call $ext_deposit_event + (i32.const 0) ;; The topics buffer + (i32.const 0) ;; The topics buffer's length + (i32.const 8) ;; The data buffer + (i32.const 4) ;; The data buffer's length + ) + (call $ext_return + (i32.const 8) + (i32.const 4) + ) + (unreachable) + ) + + (func (export "call") + (unreachable) + ) + (func (export "deploy")) + + (data (i32.const 8) "\01\02\03\04") +) diff --git a/frame/contracts/tests/return_with_data.wat b/frame/contracts/tests/return_with_data.wat new file mode 100644 index 0000000000000000000000000000000000000000..8cc84006a0b00eff34536b5f861461cb045d570e --- /dev/null +++ b/frame/contracts/tests/return_with_data.wat @@ -0,0 +1,39 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_scratch_write" (func $ext_scratch_write (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; Deploy routine is the same as call. + (func (export "deploy") (result i32) + (call $call) + ) + + ;; Call reads the first 4 bytes (LE) as the exit status and returns the rest as output data. + (func $call (export "call") (result i32) + (local $buf_size i32) + (local $exit_status i32) + + ;; Find out the size of the scratch buffer + (set_local $buf_size (call $ext_scratch_size)) + + ;; Copy scratch buffer into this contract memory. + (call $ext_scratch_read + (i32.const 0) ;; The pointer where to store the scratch buffer contents, + (i32.const 0) ;; Offset from the start of the scratch buffer. + (get_local $buf_size) ;; Count of bytes to copy. + ) + + ;; Copy all but the first 4 bytes of the input data as the output data. + (call $ext_scratch_write + (i32.const 4) ;; Pointer to the data to return. + (i32.sub ;; Count of bytes to copy. + (get_local $buf_size) + (i32.const 4) + ) + ) + + ;; Return the first 4 bytes of the input data as the exit status. + (i32.load (i32.const 0)) + ) +) diff --git a/frame/contracts/tests/run_out_of_gas.wat b/frame/contracts/tests/run_out_of_gas.wat new file mode 100644 index 0000000000000000000000000000000000000000..52ee92539fd521d656235cc3f3feb555bcd12cb9 --- /dev/null +++ b/frame/contracts/tests/run_out_of_gas.wat @@ -0,0 +1,7 @@ +(module + (func (export "call") + (loop $inf (br $inf)) ;; just run out of gas + (unreachable) + ) + (func (export "deploy")) +) diff --git a/frame/contracts/tests/self_destruct.wat b/frame/contracts/tests/self_destruct.wat new file mode 100644 index 0000000000000000000000000000000000000000..464b5c663ea4a96f544a8f7c6534006c474c5aa1 --- /dev/null +++ b/frame/contracts/tests/self_destruct.wat @@ -0,0 +1,72 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_address" (func $ext_address)) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "ext_terminate" (func $ext_terminate (param i32 i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy")) + + (func (export "call") + ;; If the input data is not empty, then recursively call self with empty input data. + ;; This should trap instead of self-destructing since a contract cannot be removed live in + ;; the execution stack cannot be removed. If the recursive call traps, then trap here as + ;; well. + (if (call $ext_scratch_size) + (then + (call $ext_address) + + ;; Expect address to be 8 bytes. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + + ;; Read own address into memory. + (call $ext_scratch_read + (i32.const 16) ;; Pointer to write address to + (i32.const 0) ;; Offset into scratch buffer + (i32.const 8) ;; Length of encoded address + ) + + ;; Recursively call self with empty input data. + (call $assert + (i32.eq + (call $ext_call + (i32.const 16) ;; Pointer to own address + (i32.const 8) ;; Length of own address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + ) + (else + ;; Try to terminate and give balance to django. + (call $ext_terminate + (i32.const 32) ;; Pointer to beneficiary address + (i32.const 8) ;; Length of beneficiary address + ) + (unreachable) ;; ext_terminate never returns + ) + ) + ) + ;; Address of django + (data (i32.const 32) "\04\00\00\00\00\00\00\00") +) diff --git a/frame/contracts/tests/self_destructing_constructor.wat b/frame/contracts/tests/self_destructing_constructor.wat new file mode 100644 index 0000000000000000000000000000000000000000..b19d6e5b50daca2b37f20c7866134a02097c6285 --- /dev/null +++ b/frame/contracts/tests/self_destructing_constructor.wat @@ -0,0 +1,54 @@ +(module + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "ext_balance" (func $ext_balance)) + (import "env" "ext_call" (func $ext_call (param i32 i32 i64 i32 i32 i32 i32) (result i32))) + (import "env" "memory" (memory 1 1)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "deploy") + ;; Send entire remaining balance to the 0 address. + (call $ext_balance) + + ;; Balance should be encoded as a u64. + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 8) + ) + ) + + ;; Read balance into memory. + (call $ext_scratch_read + (i32.const 8) ;; Pointer to write balance to + (i32.const 0) ;; Offset into scratch buffer + (i32.const 8) ;; Length of encoded balance + ) + + ;; Self-destruct by sending full balance to the 0 address. + (call $assert + (i32.eq + (call $ext_call + (i32.const 0) ;; Pointer to destination address + (i32.const 8) ;; Length of destination address + (i64.const 0) ;; How much gas to devote for the execution. 0 = all. + (i32.const 8) ;; Pointer to the buffer with value to transfer + (i32.const 8) ;; Length of the buffer with value to transfer + (i32.const 0) ;; Pointer to input data buffer address + (i32.const 0) ;; Length of input data buffer + ) + (i32.const 0) + ) + ) + ) + + (func (export "call")) +) diff --git a/frame/contracts/tests/set_rent.wat b/frame/contracts/tests/set_rent.wat new file mode 100644 index 0000000000000000000000000000000000000000..d1affa0d7415f4257597cfac518b5cc7a72631e6 --- /dev/null +++ b/frame/contracts/tests/set_rent.wat @@ -0,0 +1,101 @@ +(module + (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) + (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "env" "ext_clear_storage" (func $ext_clear_storage (param i32))) + (import "env" "ext_set_rent_allowance" (func $ext_set_rent_allowance (param i32 i32))) + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "memory" (memory 1 1)) + + ;; insert a value of 4 bytes into storage + (func $call_0 + (call $ext_set_storage + (i32.const 1) + (i32.const 0) + (i32.const 4) + ) + ) + + ;; remove the value inserted by call_1 + (func $call_1 + (call $ext_clear_storage + (i32.const 1) + ) + ) + + ;; transfer 50 to ALICE + (func $call_2 + (call $ext_dispatch_call + (i32.const 68) + (i32.const 11) + ) + ) + + ;; do nothing + (func $call_else) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + ;; Dispatch the call according to input size + (func (export "call") + (local $input_size i32) + (set_local $input_size + (call $ext_scratch_size) + ) + (block $IF_ELSE + (block $IF_2 + (block $IF_1 + (block $IF_0 + (br_table $IF_0 $IF_1 $IF_2 $IF_ELSE + (get_local $input_size) + ) + (unreachable) + ) + (call $call_0) + return + ) + (call $call_1) + return + ) + (call $call_2) + return + ) + (call $call_else) + ) + + ;; Set into storage a 4 bytes value + ;; Set call set_rent_allowance with input + (func (export "deploy") + (local $input_size i32) + (set_local $input_size + (call $ext_scratch_size) + ) + (call $ext_set_storage + (i32.const 0) + (i32.const 0) + (i32.const 4) + ) + (call $ext_scratch_read + (i32.const 0) + (i32.const 0) + (get_local $input_size) + ) + (call $ext_set_rent_allowance + (i32.const 0) + (get_local $input_size) + ) + ) + + ;; Encoding of 10 in balance + (data (i32.const 0) "\28") + + ;; Encoding of call transfer 50 to CHARLIE + (data (i32.const 68) "\00\00\03\00\00\00\00\00\00\00\C8") +) diff --git a/frame/contracts/tests/storage_size.wat b/frame/contracts/tests/storage_size.wat new file mode 100644 index 0000000000000000000000000000000000000000..8de9f42ee97483fed59b8d84724ef431c3777728 --- /dev/null +++ b/frame/contracts/tests/storage_size.wat @@ -0,0 +1,60 @@ +(module + (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) + (import "env" "ext_set_storage" (func $ext_set_storage (param i32 i32 i32))) + (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) + (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) + (import "env" "memory" (memory 16 16)) + + (func $assert (param i32) + (block $ok + (br_if $ok + (get_local 0) + ) + (unreachable) + ) + ) + + (func (export "call") + ;; assert $ext_scratch_size == 8 + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.const 4) + ) + ) + + ;; copy contents of the scratch buffer into the contract's memory. + (call $ext_scratch_read + (i32.const 32) ;; Pointer in memory to the place where to copy. + (i32.const 0) ;; Offset from the start of the scratch buffer. + (i32.const 4) ;; Count of bytes to copy. + ) + + ;; place a garbage value in storage, the size of which is specified by the call input. + (call $ext_set_storage + (i32.const 0) ;; Pointer to storage key + (i32.const 0) ;; Pointer to value + (i32.load (i32.const 32)) ;; Size of value + ) + + (call $assert + (i32.eq + (call $ext_get_storage + (i32.const 0) ;; Pointer to storage key + ) + (i32.const 0) + ) + ) + + (call $assert + (i32.eq + (call $ext_scratch_size) + (i32.load (i32.const 32)) + ) + ) + ) + + (func (export "deploy")) + + (data (i32.const 0) "\01") ;; Storage key (32 B) +) diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index d2938956bd015333e28c7bbd67fc4a61a1bea282..788645764410b9bf35dcf969e1ee152a20716a64 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-democracy" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,19 @@ description = "FRAME pallet for democracy" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +pallet-scheduler = { version = "2.0.0-alpha.5", path = "../scheduler" } +sp-storage = { version = "2.0.0-alpha.5", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] @@ -30,7 +32,16 @@ std = [ "codec/std", "sp-std/std", "sp-io/std", + "frame-benchmarking/std", "frame-support/std", "sp-runtime/std", "frame-system/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-system/runtime-benchmarks", + "sp-runtime/runtime-benchmarks", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..6165a4f8977edd32e08d1077835de8db17f30bde --- /dev/null +++ b/frame/democracy/src/benchmarking.rs @@ -0,0 +1,488 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Democracy pallet benchmarking. + +use super::*; + +use frame_benchmarking::{benchmarks, account}; +use frame_support::traits::{Currency, Get, EnsureOrigin}; +use frame_system::{RawOrigin, Module as System, self}; +use sp_runtime::traits::{Bounded, One}; + +use crate::Module as Democracy; + +const SEED: u32 = 0; +const MAX_USERS: u32 = 1000; +const MAX_REFERENDUMS: u32 = 100; +const MAX_PROPOSALS: u32 = 100; +const MAX_SECONDERS: u32 = 100; +const MAX_VETOERS: u32 = 100; +const MAX_BYTES: u32 = 16_384; + +fn funded_account(name: &'static str, index: u32) -> T::AccountId { + let caller: T::AccountId = account(name, index, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + caller +} + +fn add_proposal(n: u32) -> Result { + let other = funded_account::("proposer", n); + let value = T::MinimumDeposit::get(); + let proposal_hash: T::Hash = T::Hashing::hash_of(&n); + + Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value.into())?; + + Ok(proposal_hash) +} + +fn add_referendum(n: u32) -> Result { + let proposal_hash = add_proposal::(n)?; + let vote_threshold = VoteThreshold::SimpleMajority; + + Democracy::::inject_referendum( + 0.into(), + proposal_hash, + vote_threshold, + 0.into(), + ); + let referendum_index: ReferendumIndex = ReferendumCount::get() - 1; + let _ = T::Scheduler::schedule_named( + (DEMOCRACY_ID, referendum_index), + 0.into(), + None, + 63, + Call::enact_proposal(proposal_hash, referendum_index).into(), + ); + Ok(referendum_index) +} + +fn account_vote() -> AccountVote> { + let v = Vote { + aye: true, + conviction: Conviction::Locked1x, + }; + + AccountVote::Standard { + vote: v, + balance: BalanceOf::::one(), + } +} + +fn open_activate_proxy(u: u32) -> Result { + let caller = funded_account::("caller", u); + let proxy = funded_account::("proxy", u); + + Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; + Democracy::::activate_proxy(RawOrigin::Signed(caller).into(), proxy.clone())?; + + Ok(proxy) +} + +benchmarks! { + _ { } + + propose { + let p in 1 .. MAX_PROPOSALS; + + // Add p proposals + for i in 0 .. p { + add_proposal::(i)?; + } + + let caller = funded_account::("caller", 0); + let proposal_hash: T::Hash = T::Hashing::hash_of(&p); + let value = T::MinimumDeposit::get(); + }: _(RawOrigin::Signed(caller), proposal_hash, value.into()) + + second { + let s in 0 .. MAX_SECONDERS; + + let caller = funded_account::("caller", 0); + let proposal_hash = add_proposal::(s)?; + + // Create s existing "seconds" + for i in 0 .. s { + let seconder = funded_account::("seconder", i); + Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; + } + + }: _(RawOrigin::Signed(caller), 0) + + vote { + let r in 1 .. MAX_REFERENDUMS; + + let caller = funded_account::("caller", 0); + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } + + let referendum_index = r - 1; + + }: _(RawOrigin::Signed(caller), referendum_index, account_vote) + + proxy_vote { + let r in 1 .. MAX_REFERENDUMS; + + let caller = funded_account::("caller", r); + let proxy = open_activate_proxy::(r)?; + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } + + let referendum_index = r - 1; + + }: _(RawOrigin::Signed(proxy), referendum_index, account_vote) + + emergency_cancel { + let u in 1 .. MAX_USERS; + + let referendum_index = add_referendum::(u)?; + let origin = T::CancellationOrigin::successful_origin(); + let call = Call::::emergency_cancel(referendum_index); + }: { + let _ = call.dispatch(origin)?; + } + + external_propose { + let u in 1 .. MAX_USERS; + + let origin = T::ExternalOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose(proposal_hash); + }: { + let _ = call.dispatch(origin)?; + } + + external_propose_majority { + let u in 1 .. MAX_USERS; + + let origin = T::ExternalMajorityOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose_majority(proposal_hash); + + }: { + let _ = call.dispatch(origin)?; + } + + external_propose_default { + let u in 1 .. MAX_USERS; + + let origin = T::ExternalDefaultOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose_default(proposal_hash); + + }: { + let _ = call.dispatch(origin)?; + } + + fast_track { + let u in 1 .. MAX_USERS; + + let origin_propose = T::ExternalDefaultOrigin::successful_origin(); + let proposal_hash: T::Hash = T::Hashing::hash_of(&u); + Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; + + let origin_fast_track = T::FastTrackOrigin::successful_origin(); + let voting_period = T::FastTrackVotingPeriod::get(); + let delay = 0; + let call = Call::::fast_track(proposal_hash, voting_period.into(), delay.into()); + + }: { + let _ = call.dispatch(origin_fast_track)?; + } + + veto_external { + // Existing veto-ers + let v in 0 .. MAX_VETOERS; + + let proposal_hash: T::Hash = T::Hashing::hash_of(&v); + + let origin_propose = T::ExternalDefaultOrigin::successful_origin(); + Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; + + let mut vetoers: Vec = Vec::new(); + for i in 0 .. v { + vetoers.push(account("vetoer", i, SEED)); + } + Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); + + let call = Call::::veto_external(proposal_hash); + let origin = T::VetoOrigin::successful_origin(); + }: { + let _ = call.dispatch(origin)?; + } + + cancel_referendum { + let u in 1 .. MAX_USERS; + + let referendum_index = add_referendum::(u)?; + }: _(RawOrigin::Root, referendum_index) + + cancel_queued { + let u in 1 .. MAX_USERS; + + let referendum_index = add_referendum::(u)?; + }: _(RawOrigin::Root, referendum_index) + + open_proxy { + let u in 1 .. MAX_USERS; + + let caller: T::AccountId = funded_account::("caller", u); + let proxy: T::AccountId = funded_account::("proxy", u); + + }: _(RawOrigin::Signed(proxy), caller) + + activate_proxy { + let u in 1 .. MAX_USERS; + + let caller: T::AccountId = funded_account::("caller", u); + let proxy: T::AccountId = funded_account::("proxy", u); + Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; + + }: _(RawOrigin::Signed(caller), proxy) + + close_proxy { + let u in 1 .. MAX_USERS; + + let proxy = open_activate_proxy::(u)?; + + }: _(RawOrigin::Signed(proxy)) + + deactivate_proxy { + let u in 1 .. MAX_USERS; + + let caller = funded_account::("caller", u); + let proxy = open_activate_proxy::(u)?; + + }: _(RawOrigin::Signed(caller), proxy) + + delegate { + let u in 1 .. MAX_USERS; + + let caller = funded_account::("caller", u); + let d: T::AccountId = funded_account::("delegate", u); + let balance = 1u32; + + }: _(RawOrigin::Signed(caller), d.into(), Conviction::Locked1x, balance.into()) + + undelegate { + let r in 1 .. MAX_REFERENDUMS; + + let other = funded_account::("other", 0); + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } + + let delegator = funded_account::("delegator", r); + let conviction = Conviction::Locked1x; + let balance = 1u32; + + Democracy::::delegate(RawOrigin::Signed(delegator.clone()).into(), other.clone().into(), conviction, balance.into())?; + + }: _(RawOrigin::Signed(delegator)) + + clear_public_proposals { + let p in 0 .. MAX_PROPOSALS; + + for i in 0 .. p { + add_proposal::(i)?; + } + + }: _(RawOrigin::Root) + + note_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let caller = funded_account::("caller", b); + let encoded_proposal = vec![0; b as usize]; + }: _(RawOrigin::Signed(caller), encoded_proposal) + + note_imminent_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + // d + 1 to include the one we are testing + let encoded_proposal = vec![0; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + let block_number = T::BlockNumber::one(); + Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); + + let caller = funded_account::("caller", b); + let encoded_proposal = vec![0; b as usize]; + }: _(RawOrigin::Signed(caller), encoded_proposal) + + reap_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; + + let encoded_proposal = vec![0; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + + let caller = funded_account::("caller", b); + Democracy::::note_preimage(RawOrigin::Signed(caller.clone()).into(), encoded_proposal.clone())?; + + // We need to set this otherwise we get `Early` error. + let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); + System::::set_block_number(block_number.into()); + + }: _(RawOrigin::Signed(caller), proposal_hash) + + unlock { + let u in 1 .. MAX_USERS; + + let caller = funded_account::("caller", u); + let locked_until = T::BlockNumber::zero(); + Locks::::insert(&caller, locked_until); + + T::Currency::extend_lock( + DEMOCRACY_ID, + &caller, + Bounded::max_value(), + WithdrawReason::Transfer.into() + ); + + let other = caller.clone(); + + }: _(RawOrigin::Signed(caller), other) + + remove_vote { + let r in 1 .. MAX_REFERENDUMS; + + let caller = funded_account::("caller", 0); + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } + + let referendum_index = r - 1; + + }: _(RawOrigin::Signed(caller), referendum_index) + + remove_other_vote { + let r in 1 .. MAX_REFERENDUMS; + + let other = funded_account::("other", r); + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } + + let referendum_index = r - 1; + ReferendumInfoOf::::insert( + referendum_index, + ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true } + ); + let caller = funded_account::("caller", r); + + System::::set_block_number(T::EnactmentPeriod::get() * 10u32.into()); + + }: _(RawOrigin::Signed(caller), other, referendum_index) + + proxy_delegate { + let u in 1 .. MAX_USERS; + + let other: T::AccountId = account("other", u, SEED); + let proxy = open_activate_proxy::(u)?; + let conviction = Conviction::Locked1x; + let balance = 1u32; + + }: _(RawOrigin::Signed(proxy), other, conviction, balance.into()) + + proxy_undelegate { + let r in 1 .. MAX_REFERENDUMS; + + let other = funded_account::("other", 0); + let account_vote = account_vote::(); + + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } + + let proxy = open_activate_proxy::(r)?; + let conviction = Conviction::Locked1x; + let balance = 1u32; + Democracy::::proxy_delegate(RawOrigin::Signed(proxy.clone()).into(), other, conviction, balance.into())?; + + }: _(RawOrigin::Signed(proxy)) + + proxy_remove_vote { + let u in 1 .. MAX_USERS; + + let referendum_index = add_referendum::(u)?; + let account_vote = account_vote::(); + let proxy = open_activate_proxy::(u)?; + + Democracy::::proxy_vote(RawOrigin::Signed(proxy.clone()).into(), referendum_index, account_vote)?; + + }: _(RawOrigin::Signed(proxy), referendum_index) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose::()); + assert_ok!(test_benchmark_second::()); + assert_ok!(test_benchmark_vote::()); + assert_ok!(test_benchmark_proxy_vote::()); + assert_ok!(test_benchmark_emergency_cancel::()); + assert_ok!(test_benchmark_external_propose::()); + assert_ok!(test_benchmark_external_propose_majority::()); + assert_ok!(test_benchmark_external_propose_default::()); + assert_ok!(test_benchmark_fast_track::()); + assert_ok!(test_benchmark_veto_external::()); + assert_ok!(test_benchmark_cancel_referendum::()); + assert_ok!(test_benchmark_cancel_queued::()); + assert_ok!(test_benchmark_open_proxy::()); + assert_ok!(test_benchmark_activate_proxy::()); + assert_ok!(test_benchmark_close_proxy::()); + assert_ok!(test_benchmark_deactivate_proxy::()); + assert_ok!(test_benchmark_delegate::()); + assert_ok!(test_benchmark_undelegate::()); + assert_ok!(test_benchmark_clear_public_proposals::()); + assert_ok!(test_benchmark_note_preimage::()); + assert_ok!(test_benchmark_note_imminent_preimage::()); + assert_ok!(test_benchmark_reap_preimage::()); + assert_ok!(test_benchmark_unlock::()); + assert_ok!(test_benchmark_remove_vote::()); + assert_ok!(test_benchmark_remove_other_vote::()); + assert_ok!(test_benchmark_proxy_delegate::()); + assert_ok!(test_benchmark_proxy_undelegate::()); + assert_ok!(test_benchmark_proxy_remove_vote::()); + }); + } +} diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs new file mode 100644 index 0000000000000000000000000000000000000000..a057ee2a357503d0a51648874b92a9006c3d1087 --- /dev/null +++ b/frame/democracy/src/conviction.rs @@ -0,0 +1,113 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The conviction datatype. + +use sp_std::{result::Result, convert::TryFrom}; +use sp_runtime::{RuntimeDebug, traits::{Zero, Bounded, CheckedMul, CheckedDiv}}; +use codec::{Encode, Decode}; +use crate::types::Delegations; + +/// A value denoting the strength of conviction of a vote. +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +pub enum Conviction { + /// 0.1x votes, unlocked. + None, + /// 1x votes, locked for an enactment period following a successful vote. + Locked1x, + /// 2x votes, locked for 2x enactment periods following a successful vote. + Locked2x, + /// 3x votes, locked for 4x... + Locked3x, + /// 4x votes, locked for 8x... + Locked4x, + /// 5x votes, locked for 16x... + Locked5x, + /// 6x votes, locked for 32x... + Locked6x, +} + +impl Default for Conviction { + fn default() -> Self { + Conviction::None + } +} + +impl From for u8 { + fn from(c: Conviction) -> u8 { + match c { + Conviction::None => 0, + Conviction::Locked1x => 1, + Conviction::Locked2x => 2, + Conviction::Locked3x => 3, + Conviction::Locked4x => 4, + Conviction::Locked5x => 5, + Conviction::Locked6x => 6, + } + } +} + +impl TryFrom for Conviction { + type Error = (); + fn try_from(i: u8) -> Result { + Ok(match i { + 0 => Conviction::None, + 1 => Conviction::Locked1x, + 2 => Conviction::Locked2x, + 3 => Conviction::Locked3x, + 4 => Conviction::Locked4x, + 5 => Conviction::Locked5x, + 6 => Conviction::Locked6x, + _ => return Err(()), + }) + } +} + +impl Conviction { + /// The amount of time (in number of periods) that our conviction implies a successful voter's + /// balance should be locked for. + pub fn lock_periods(self) -> u32 { + match self { + Conviction::None => 0, + Conviction::Locked1x => 1, + Conviction::Locked2x => 2, + Conviction::Locked3x => 4, + Conviction::Locked4x => 8, + Conviction::Locked5x => 16, + Conviction::Locked6x => 32, + } + } + + /// The votes of a voter of the given `balance` with our conviction. + pub fn votes< + B: From + Zero + Copy + CheckedMul + CheckedDiv + Bounded + >(self, capital: B) -> Delegations { + let votes = match self { + Conviction::None => capital.checked_div(&10u8.into()).unwrap_or_else(Zero::zero), + x => capital.checked_mul(&u8::from(x).into()).unwrap_or_else(B::max_value), + }; + Delegations { votes, capital } + } +} + +impl Bounded for Conviction { + fn min_value() -> Self { + Conviction::None + } + fn max_value() -> Self { + Conviction::Locked6x + } +} diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index 2418bc7c3d89a7275040084ee80d8644a02652e1..b09f305c642477cc277653a5ddd33be68a4a9766 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -51,9 +51,10 @@ //! account or an external origin) suggests that the system adopt. //! - **Referendum:** A proposal that is in the process of being voted on for //! either acceptance or rejection as a change to the system. -//! - **Proxy:** An account that votes on behalf of a separate "Stash" account +//! - **Proxy:** An account that has full voting power on behalf of a separate "Stash" account //! that holds the funds. -//! - **Delegation:** The act of granting your voting power to the decisions of another account. +//! - **Delegation:** The act of granting your voting power to the decisions of another account for +//! up to a certain conviction. //! //! ### Adaptive Quorum Biasing //! @@ -77,21 +78,33 @@ //! These calls can be made from any externally held account capable of creating //! a signed extrinsic. //! -//! - `propose` - Submits a sensitive action, represented as a hash. -//! Requires a deposit. -//! - `second` - Signals agreement with a proposal, moves it higher on the -//! proposal queue, and requires a matching deposit to the original. -//! - `vote` - Votes in a referendum, either the vote is "Aye" to enact the -//! proposal or "Nay" to keep the status quo. -//! - `proxy_vote` - Votes in a referendum on behalf of a stash account. +//! Basic actions: +//! - `propose` - Submits a sensitive action, represented as a hash. Requires a deposit. +//! - `second` - Signals agreement with a proposal, moves it higher on the proposal queue, and +//! requires a matching deposit to the original. +//! - `vote` - Votes in a referendum, either the vote is "Aye" to enact the proposal or "Nay" to +//! keep the status quo. +//! - `unvote` - Cancel a previous vote, this must be done by the voter before the vote ends. +//! - `delegate` - Delegates the voting power (tokens * conviction) to another account. +//! - `undelegate` - Stops the delegation of voting power to another account. +//! +//! Administration actions that can be done to any account: +//! - `reap_vote` - Remove some account's expired votes. +//! - `unlock` - Redetermine the account's balance lock, potentially making tokens available. +//! +//! Proxy administration: //! - `activate_proxy` - Activates a proxy that is already open to the sender. //! - `close_proxy` - Clears the proxy status, called by the proxy. -//! - `deactivate_proxy` - Deactivates a proxy back to the open status, called by -//! the stash. +//! - `deactivate_proxy` - Deactivates a proxy back to the open status, called by the stash. //! - `open_proxy` - Opens a proxy account on behalf of the sender. -//! - `delegate` - Delegates the voting power (tokens * conviction) to another -//! account. -//! - `undelegate` - Stops the delegation of voting power to another account. +//! +//! Proxy actions: +//! - `proxy_vote` - Votes in a referendum on behalf of a stash account. +//! - `proxy_unvote` - Cancel a previous vote, done on behalf of the voter by a proxy. +//! - `proxy_delegate` - Delegate voting power, done on behalf of the voter by a proxy. +//! - `proxy_undelegate` - Stop delegating voting power, done on behalf of the voter by a proxy. +//! +//! Preimage actions: //! - `note_preimage` - Registers the preimage for an upcoming proposal, requires //! a deposit that is returned once the proposal is enacted. //! - `note_imminent_preimage` - Registers the preimage for an upcoming proposal. @@ -99,7 +112,6 @@ //! - `reap_preimage` - Removes the preimage for an expired proposal. Will only //! work under the condition that it's the same account that noted it and //! after the voting period, OR it's a different account after the enactment period. -//! - `unlock` - Unlocks tokens that have an expired lock. //! //! #### Cancellation Origin //! @@ -152,24 +164,35 @@ #![cfg_attr(not(feature = "std"), no_std)] use sp_std::prelude::*; -use sp_std::{result, convert::TryFrom}; use sp_runtime::{ - RuntimeDebug, DispatchResult, - traits::{Zero, Bounded, CheckedMul, CheckedDiv, EnsureOrigin, Hash, Dispatchable, Saturating}, + DispatchResult, DispatchError, RuntimeDebug, + traits::{Zero, Hash, Dispatchable, Saturating}, }; -use codec::{Ref, Encode, Decode, Input, Output}; +use codec::{Ref, Encode, Decode}; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, ensure, Parameter, - weights::SimpleDispatchInfo, + weights::{SimpleDispatchInfo, Weight, WeighData}, traits::{ Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, - OnUnbalanced, BalanceStatus + OnUnbalanced, BalanceStatus, schedule::Named as ScheduleNamed, EnsureOrigin } }; use frame_system::{self as system, ensure_signed, ensure_root}; mod vote_threshold; +mod vote; +mod conviction; +mod types; pub use vote_threshold::{Approved, VoteThreshold}; +pub use vote::{Vote, AccountVote, Voting}; +pub use conviction::Conviction; +pub use types::{ReferendumInfo, ReferendumStatus, ProxyState, Tally, UnvoteScope, Delegations}; + +#[cfg(test)] +mod tests; + +#[cfg(feature = "runtime-benchmarks")] +pub mod benchmarking; const DEMOCRACY_ID: LockIdentifier = *b"democrac"; @@ -179,137 +202,12 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -/// A value denoting the strength of conviction of a vote. -#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] -pub enum Conviction { - /// 0.1x votes, unlocked. - None, - /// 1x votes, locked for an enactment period following a successful vote. - Locked1x, - /// 2x votes, locked for 2x enactment periods following a successful vote. - Locked2x, - /// 3x votes, locked for 4x... - Locked3x, - /// 4x votes, locked for 8x... - Locked4x, - /// 5x votes, locked for 16x... - Locked5x, - /// 6x votes, locked for 32x... - Locked6x, -} - -impl Default for Conviction { - fn default() -> Self { - Conviction::None - } -} - -impl From for u8 { - fn from(c: Conviction) -> u8 { - match c { - Conviction::None => 0, - Conviction::Locked1x => 1, - Conviction::Locked2x => 2, - Conviction::Locked3x => 3, - Conviction::Locked4x => 4, - Conviction::Locked5x => 5, - Conviction::Locked6x => 6, - } - } -} - -impl TryFrom for Conviction { - type Error = (); - fn try_from(i: u8) -> result::Result { - Ok(match i { - 0 => Conviction::None, - 1 => Conviction::Locked1x, - 2 => Conviction::Locked2x, - 3 => Conviction::Locked3x, - 4 => Conviction::Locked4x, - 5 => Conviction::Locked5x, - 6 => Conviction::Locked6x, - _ => return Err(()), - }) - } -} - -impl Conviction { - /// The amount of time (in number of periods) that our conviction implies a successful voter's - /// balance should be locked for. - fn lock_periods(self) -> u32 { - match self { - Conviction::None => 0, - Conviction::Locked1x => 1, - Conviction::Locked2x => 2, - Conviction::Locked3x => 4, - Conviction::Locked4x => 8, - Conviction::Locked5x => 16, - Conviction::Locked6x => 32, - } - } - - /// The votes of a voter of the given `balance` with our conviction. - fn votes< - B: From + Zero + Copy + CheckedMul + CheckedDiv + Bounded - >(self, balance: B) -> (B, B) { - match self { - Conviction::None => { - let r = balance.checked_div(&10u8.into()).unwrap_or_else(Zero::zero); - (r, r) - } - x => ( - balance.checked_mul(&u8::from(x).into()).unwrap_or_else(B::max_value), - balance, - ) - } - } -} - -impl Bounded for Conviction { - fn min_value() -> Self { - Conviction::None - } - - fn max_value() -> Self { - Conviction::Locked6x - } -} - -const MAX_RECURSION_LIMIT: u32 = 16; - -/// A number of lock periods, plus a vote, one way or the other. -#[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] -pub struct Vote { - pub aye: bool, - pub conviction: Conviction, -} - -impl Encode for Vote { - fn encode_to(&self, output: &mut T) { - output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); - } -} - -impl codec::EncodeLike for Vote {} - -impl Decode for Vote { - fn decode(input: &mut I) -> core::result::Result { - let b = input.read_byte()?; - Ok(Vote { - aye: (b & 0b1000_0000) == 0b1000_0000, - conviction: Conviction::try_from(b & 0b0111_1111) - .map_err(|_| codec::Error::from("Invalid conviction"))?, - }) - } -} - type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = -<::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait + Sized { - type Proposal: Parameter + Dispatchable; + type Proposal: Parameter + Dispatchable + From>; type Event: From> + Into<::Event>; /// Currency type for this module. @@ -344,13 +242,23 @@ pub trait Trait: frame_system::Trait + Sized { /// a negative-turnout-bias (default-carries) referendum. type ExternalDefaultOrigin: EnsureOrigin; - /// Origin from which the next referendum proposed by the external majority may be immediately - /// tabled to vote asynchronously in a similar manner to the emergency origin. It remains a - /// majority-carries vote. + /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to + /// vote according to the `FastTrackVotingPeriod` asynchronously in a similar manner to the + /// emergency origin. It retains its threshold method. type FastTrackOrigin: EnsureOrigin; - /// Minimum voting period allowed for an fast-track/emergency referendum. - type EmergencyVotingPeriod: Get; + /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to + /// vote immediately and asynchronously in a similar manner to the emergency origin. It retains + /// its threshold method. + type InstantOrigin: EnsureOrigin; + + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may want + /// to set this permanently to `false`, others may want to condition it on things such as + /// an upgrade having happened recently. + type InstantAllowed: Get; + + /// Minimum voting period allowed for a fast-track referendum. + type FastTrackVotingPeriod: Get; /// Origin from which any referendum may be cancelled in an emergency. type CancellationOrigin: EnsureOrigin; @@ -366,102 +274,84 @@ pub trait Trait: frame_system::Trait + Sized { /// Handler for the unbalanced reduction when slashing a preimage deposit. type Slash: OnUnbalanced>; -} - -/// Info regarding an ongoing referendum. -#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct ReferendumInfo { - /// When voting on this referendum will end. - end: BlockNumber, - /// The hash of the proposal being voted on. - proposal_hash: Hash, - /// The thresholding mechanism to determine whether it passed. - threshold: VoteThreshold, - /// The delay (in blocks) to wait after a successful referendum before deploying. - delay: BlockNumber, -} -impl ReferendumInfo { - /// Create a new instance. - pub fn new( - end: BlockNumber, - proposal_hash: Hash, - threshold: VoteThreshold, - delay: BlockNumber - ) -> Self { - ReferendumInfo { end, proposal_hash, threshold, delay } - } + /// The Scheduler. + type Scheduler: ScheduleNamed; } -/// State of a proxy voting account. -#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] -pub enum ProxyState { - /// Account is open to becoming a proxy but is not yet assigned. - Open(AccountId), - /// Account is actively being a proxy. - Active(AccountId), +#[derive(Clone, Encode, Decode, RuntimeDebug)] +pub enum PreimageStatus { + /// The preimage is imminently needed at the argument. + Missing(BlockNumber), + /// The preimage is available. + Available { + data: Vec, + provider: AccountId, + deposit: Balance, + since: BlockNumber, + /// None if it's not imminent. + expiry: Option, + }, } -impl ProxyState { - fn as_active(self) -> Option { +impl PreimageStatus { + fn to_missing_expiry(self) -> Option { match self { - ProxyState::Active(a) => Some(a), - ProxyState::Open(_) => None, + PreimageStatus::Missing(expiry) => Some(expiry), + _ => None, } } } decl_storage! { trait Store for Module as Democracy { + // TODO: Refactor public proposal queue into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 /// The number of (public) proposals that have been made so far. pub PublicPropCount get(fn public_prop_count) build(|_| 0 as PropIndex) : PropIndex; /// The public proposals. Unsorted. The second item is the proposal's hash. pub PublicProps get(fn public_props): Vec<(PropIndex, T::Hash, T::AccountId)>; + /// Those who have locked a deposit. + pub DepositOf get(fn deposit_of): + map hasher(twox_64_concat) PropIndex => Option<(BalanceOf, Vec)>; + /// Map of hashes to the proposal preimage, along with who registered it and their deposit. /// The block number is the block at which it was deposited. + // TODO: Refactor Preimages into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 pub Preimages: - map hasher(blake2_256) T::Hash - => Option<(Vec, T::AccountId, BalanceOf, T::BlockNumber)>; - /// Those who have locked a deposit. - pub DepositOf get(fn deposit_of): - map hasher(blake2_256) PropIndex => Option<(BalanceOf, Vec)>; + map hasher(identity) T::Hash + => Option, T::BlockNumber>>; /// The next free referendum index, aka the number of referenda started so far. pub ReferendumCount get(fn referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; /// The lowest referendum index representing an unbaked referendum. Equal to /// `ReferendumCount` if there isn't a unbaked referendum. pub LowestUnbaked get(fn lowest_unbaked) build(|_| 0 as ReferendumIndex): ReferendumIndex; + /// Information concerning any given referendum. pub ReferendumInfoOf get(fn referendum_info): - map hasher(blake2_256) ReferendumIndex - => Option>; - /// Queue of successful referenda to be dispatched. Stored ordered by block number. - pub DispatchQueue get(fn dispatch_queue): Vec<(T::BlockNumber, T::Hash, ReferendumIndex)>; + map hasher(twox_64_concat) ReferendumIndex + => Option>>; - /// Get the voters for the current proposal. - pub VotersFor get(fn voters_for): - map hasher(blake2_256) ReferendumIndex => Vec; - - /// Get the vote in a given referendum of a particular voter. The result is meaningful only - /// if `voters_for` includes the voter when called with the referendum (you'll get the - /// default `Vote` value otherwise). If you don't want to check `voters_for`, then you can - /// also check for simple existence with `VoteOf::contains_key` first. - pub VoteOf get(fn vote_of): map hasher(blake2_256) (ReferendumIndex, T::AccountId) => Vote; + /// All votes for a particular voter. We store the balance for the number of votes that we + /// have recorded. The second item is the total amount of delegations, that will be added. + pub VotingOf: map hasher(twox_64_concat) T::AccountId => Voting, T::AccountId, T::BlockNumber>; /// Who is able to vote for whom. Value is the fund-holding account, key is the /// vote-transaction-sending account. - pub Proxy get(fn proxy): map hasher(blake2_256) T::AccountId => Option>; - - /// Get the account (and lock periods) to which another account is delegating vote. - pub Delegations get(fn delegations): - linked_map hasher(blake2_256) T::AccountId => (T::AccountId, Conviction); + // TODO: Refactor proxy into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + pub Proxy get(fn proxy): map hasher(twox_64_concat) T::AccountId => Option>; /// Accounts for which there are locks in action which may be removed at some point in the /// future. The value is the block number at which the lock expires and may be removed. - pub Locks get(locks): map hasher(blake2_256) T::AccountId => Option; + pub Locks get(locks): map hasher(twox_64_concat) T::AccountId => Option; /// True if the last referendum tabled was submitted externally. False if it was a public /// proposal. + // TODO: There should be any number of tabling origins, not just public and "external" (council). + // https://github.com/paritytech/substrate/issues/5322 pub LastTabledWasExternal: bool; /// The referendum to be tabled whenever it would be valid to table an external proposal. @@ -473,10 +363,10 @@ decl_storage! { /// A record of who vetoed what. Maps proposal hash to a possible existent block number /// (until when it may not be resubmitted) and who vetoed it. pub Blacklist get(fn blacklist): - map hasher(blake2_256) T::Hash => Option<(T::BlockNumber, Vec)>; + map hasher(identity) T::Hash => Option<(T::BlockNumber, Vec)>; /// Record of all proposals that have been subject to emergency cancellation. - pub Cancellations: map hasher(blake2_256) T::Hash => bool; + pub Cancellations: map hasher(identity) T::Hash => bool; } } @@ -559,7 +449,7 @@ decl_error! { /// Not imminent NotImminent, /// Too early - Early, + TooEarly, /// Imminent Imminent, /// Preimage not found @@ -579,13 +469,35 @@ decl_error! { /// A proxy-pairing was attempted to an account that was open to another account. WrongOpen, /// A proxy-de-pairing was attempted to an account that was not active. - NotActive + NotActive, + /// The given account did not vote on the referendum. + NotVoter, + /// The actor has no permission to conduct the action. + NoPermission, + /// The account is already delegating. + AlreadyDelegating, + /// An unexpected integer overflow occurred. + Overflow, + /// An unexpected integer underflow occurred. + Underflow, + /// Too high a balance was provided that the account cannot afford. + InsufficientFunds, + /// The account is not currently delegating. + NotDelegating, + /// The account currently has votes attached to it and the operation cannot succeed until + /// these are removed, either through `unvote` or `reap_vote`. + VotesExist, + /// The instant referendum origin is currently disallowed. + InstantNotAllowed, + /// Delegation to oneself makes no sense. + Nonsense, } } decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; + /// The minimum period of locking and the period between a proposal being approved and enacted. /// /// It should generally be a little more than the unstake period to ensure that @@ -603,7 +515,7 @@ decl_module! { const MinimumDeposit: BalanceOf = T::MinimumDeposit::get(); /// Minimum voting period allowed for an emergency referendum. - const EmergencyVotingPeriod: T::BlockNumber = T::EmergencyVotingPeriod::get(); + const FastTrackVotingPeriod: T::BlockNumber = T::FastTrackVotingPeriod::get(); /// Period in blocks where an external proposal may not be re-submitted after being vetoed. const CooloffPeriod: T::BlockNumber = T::CooloffPeriod::get(); @@ -613,6 +525,12 @@ decl_module! { fn deposit_event() = default; + fn on_runtime_upgrade() -> Weight { + Self::migrate(); + + SimpleDispatchInfo::default().weigh_data(()) + } + /// Propose a sensitive action to be taken. /// /// The dispatch origin of this call must be _Signed_ and the sender must @@ -624,7 +542,8 @@ decl_module! { /// Emits `Proposed`. /// /// # - /// - `O(1)`. + /// - `O(P)` + /// - P is the number proposals in the `PublicProps` vec. /// - Two DB changes, one DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] @@ -654,7 +573,8 @@ decl_module! { /// - `proposal`: The index of the proposal to second. /// /// # - /// - `O(1)`. + /// - `O(S)`. + /// - S is the number of seconds a proposal already has. /// - One DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] @@ -676,16 +596,17 @@ decl_module! { /// - `vote`: The vote configuration. /// /// # - /// - `O(1)`. + /// - `O(R)`. + /// - R is the number of referendums the voter has voted on. /// - One DB change, one DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] fn vote(origin, #[compact] ref_index: ReferendumIndex, - vote: Vote + vote: AccountVote>, ) -> DispatchResult { let who = ensure_signed(origin)?; - Self::do_vote(who, ref_index, vote) + Self::try_vote(&who, ref_index, vote) } /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact @@ -703,11 +624,11 @@ decl_module! { #[weight = SimpleDispatchInfo::FixedNormal(200_000)] fn proxy_vote(origin, #[compact] ref_index: ReferendumIndex, - vote: Vote + vote: AccountVote>, ) -> DispatchResult { let who = ensure_signed(origin)?; let voter = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; - Self::do_vote(voter, ref_index, vote) + Self::try_vote(&voter, ref_index, vote) } /// Schedule an emergency cancellation of a referendum. Cannot happen twice to the same @@ -718,18 +639,18 @@ decl_module! { /// -`ref_index`: The index of the referendum to cancel. /// /// # - /// - Depends on size of storage vec `VotersFor` for this referendum. + /// - `O(1)`. /// # #[weight = SimpleDispatchInfo::FixedOperational(500_000)] fn emergency_cancel(origin, ref_index: ReferendumIndex) { T::CancellationOrigin::ensure_origin(origin)?; - let info = Self::referendum_info(ref_index).ok_or(Error::::BadIndex)?; - let h = info.proposal_hash; + let status = Self::referendum_status(ref_index)?; + let h = status.proposal_hash; ensure!(!>::contains_key(h), Error::::AlreadyCanceled); >::insert(h, true); - Self::clear_referendum(ref_index); + Self::internal_cancel_referendum(ref_index); } /// Schedule a referendum to be tabled once it is legal to schedule an external @@ -804,7 +725,7 @@ decl_module! { /// /// - `proposal_hash`: The hash of the current external proposal. /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to - /// `EmergencyVotingPeriod` if too low. + /// `FastTrackVotingPeriod` if too low. /// - `delay`: The number of block after voting has ended in approval and this should be /// enacted. This doesn't have a minimum amount. /// @@ -819,10 +740,27 @@ decl_module! { fn fast_track(origin, proposal_hash: T::Hash, voting_period: T::BlockNumber, - delay: T::BlockNumber + delay: T::BlockNumber, ) { - T::FastTrackOrigin::ensure_origin(origin)?; - let (e_proposal_hash, threshold) = >::get().ok_or(Error::::ProposalMissing)?; + // Rather complicated bit of code to ensure that either: + // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is `FastTrackOrigin`; or + // - `InstantAllowed` is `true` and `origin` is `InstantOrigin`. + let maybe_ensure_instant = if voting_period < T::FastTrackVotingPeriod::get() { + Some(origin) + } else { + if let Err(origin) = T::FastTrackOrigin::try_origin(origin) { + Some(origin) + } else { + None + } + }; + if let Some(ensure_instant) = maybe_ensure_instant { + T::InstantOrigin::ensure_origin(ensure_instant)?; + ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); + } + + let (e_proposal_hash, threshold) = >::get() + .ok_or(Error::::ProposalMissing)?; ensure!( threshold != VoteThreshold::SuperMajorityApprove, Error::::NotSimpleMajority, @@ -831,9 +769,7 @@ decl_module! { >::kill(); let now = >::block_number(); - // We don't consider it an error if `vote_period` is too low, like `emergency_propose`. - let period = voting_period.max(T::EmergencyVotingPeriod::get()); - Self::inject_referendum(now + period, proposal_hash, threshold, delay); + Self::inject_referendum(now + voting_period, proposal_hash, threshold, delay); } /// Veto and blacklist the external proposal hash. @@ -849,6 +785,7 @@ decl_module! { /// - One DB clear. /// - Performs a binary search on `existing_vetoers` which should not /// be very large. + /// - O(log v), v is number of `existing_vetoers` /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] fn veto_external(origin, proposal_hash: T::Hash) { @@ -886,7 +823,7 @@ decl_module! { #[weight = SimpleDispatchInfo::FixedOperational(10_000)] fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { ensure_root(origin)?; - Self::clear_referendum(ref_index); + Self::internal_cancel_referendum(ref_index); } /// Cancel a proposal queued for enactment. @@ -897,21 +834,21 @@ decl_module! { /// /// # /// - One DB change. + /// - O(d) where d is the items in the dispatch queue. /// # #[weight = SimpleDispatchInfo::FixedOperational(10_000)] fn cancel_queued(origin, which: ReferendumIndex) { ensure_root(origin)?; - let mut items = >::get(); - let original_len = items.len(); - items.retain(|i| i.2 != which); - ensure!(items.len() < original_len, Error::::ProposalMissing); - >::put(items); + T::Scheduler::cancel_named((DEMOCRACY_ID, which)) + .map_err(|_| Error::::ProposalMissing)?; } - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { if let Err(e) = Self::begin_block(n) { sp_runtime::print(e); } + + SimpleDispatchInfo::default().weigh_data(()) } /// Specify a proxy that is already open to us. Called by the stash. @@ -985,43 +922,39 @@ decl_module! { })?; } - /// Delegate vote. + /// Delegate the voting power (with some given conviction) of the sending account. /// - /// Currency is locked indefinitely for as long as it's delegated. + /// The balance delegated is locked for as long as it's delegated, and thereafter for the + /// time appropriate for the conviction's lock period. /// - /// The dispatch origin of this call must be _Signed_. + /// The dispatch origin of this call must be _Signed_, and the signing account must either: + /// - be delegating already; or + /// - have no voting activity (if there is, then it will need to be removed/consolidated + /// through `reap_vote` or `unvote`). /// - /// - `to`: The account to make a delegate of the sender. - /// - `conviction`: The conviction that will be attached to the delegated - /// votes. + /// - `to`: The account whose voting the `target` account's voting power will follow. + /// - `conviction`: The conviction that will be attached to the delegated votes. When the + /// account is undelegated, the funds will be locked for the corresponding period. + /// - `balance`: The amount of the account's balance to be used in delegating. This must + /// not be more than the account's current balance. /// /// Emits `Delegated`. /// /// # - /// - One extra DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] - pub fn delegate(origin, to: T::AccountId, conviction: Conviction) { + pub fn delegate(origin, to: T::AccountId, conviction: Conviction, balance: BalanceOf) { let who = ensure_signed(origin)?; - >::insert(&who, (&to, conviction)); - // Currency is locked indefinitely as long as it's delegated. - T::Currency::extend_lock( - DEMOCRACY_ID, - &who, - Bounded::max_value(), - WithdrawReason::Transfer.into() - ); - Locks::::remove(&who); - Self::deposit_event(RawEvent::Delegated(who, to)); + Self::try_delegate(who, to, conviction, balance)?; } - /// Undelegate vote. + /// Undelegate the voting power of the sending account. /// - /// Must be sent from an account that has called delegate previously. - /// The tokens will be reduced from an indefinite lock to the maximum - /// possible according to the conviction of the prior delegation. + /// Tokens may be unlocked following once an amount of time consistent with the lock period + /// of the conviction with which the delegation was issued. /// - /// The dispatch origin of this call must be _Signed_. + /// The dispatch origin of this call must be _Signed_ and the signing account must be + /// currently delegating. /// /// Emits `Undelegated`. /// @@ -1031,19 +964,7 @@ decl_module! { #[weight = SimpleDispatchInfo::FixedNormal(500_000)] fn undelegate(origin) { let who = ensure_signed(origin)?; - ensure!(>::contains_key(&who), Error::::NotDelegated); - let (_, conviction) = >::take(&who); - // Indefinite lock is reduced to the maximum voting lock that could be possible. - let now = >::block_number(); - let locked_until = now + T::EnactmentPeriod::get() * conviction.lock_periods().into(); - Locks::::insert(&who, locked_until); - T::Currency::set_lock( - DEMOCRACY_ID, - &who, - Bounded::max_value(), - WithdrawReason::Transfer.into(), - ); - Self::deposit_event(RawEvent::Undelegated(who)); + Self::try_undelegate(who)?; } /// Clears all public proposals. @@ -1085,7 +1006,14 @@ decl_module! { T::Currency::reserve(&who, deposit)?; let now = >::block_number(); - >::insert(proposal_hash, (encoded_proposal, who.clone(), deposit, now)); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit, + since: now, + expiry: None, + }; + >::insert(proposal_hash, a); Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, deposit)); } @@ -1100,19 +1028,25 @@ decl_module! { /// Emits `PreimageNoted`. /// /// # - /// - Dependent on the size of `encoded_proposal`. + /// - Dependent on the size of `encoded_proposal` and length of dispatch queue. /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] fn note_imminent_preimage(origin, encoded_proposal: Vec) { let who = ensure_signed(origin)?; let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); - let queue = >::get(); - ensure!(queue.iter().any(|item| &item.1 == &proposal_hash), Error::::NotImminent); + let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; + let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; let now = >::block_number(); let free = >::zero(); - >::insert(proposal_hash, (encoded_proposal, who.clone(), free, now)); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit: Zero::zero(), + since: now, + expiry: Some(expiry), + }; + >::insert(proposal_hash, a); Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, free)); } @@ -1135,20 +1069,22 @@ decl_module! { #[weight = SimpleDispatchInfo::FixedNormal(10_000)] fn reap_preimage(origin, proposal_hash: T::Hash) { let who = ensure_signed(origin)?; + let (provider, deposit, since, expiry) = >::get(&proposal_hash) + .and_then(|m| match m { + PreimageStatus::Available { provider, deposit, since, expiry, .. } + => Some((provider, deposit, since, expiry)), + _ => None, + }).ok_or(Error::::PreimageMissing)?; - let (_, old, deposit, then) = >::get(&proposal_hash) - .ok_or(Error::::PreimageMissing)?; let now = >::block_number(); let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); - let additional = if who == old { Zero::zero() } else { enactment }; - ensure!(now >= then + voting + additional, Error::::Early); + let additional = if who == provider { Zero::zero() } else { enactment }; + ensure!(now >= since + voting + additional, Error::::TooEarly); + ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - let queue = >::get(); - ensure!(!queue.iter().any(|item| &item.1 == &proposal_hash), Error::::Imminent); - - let _ = T::Currency::repatriate_reserved(&old, &who, deposit, BalanceStatus::Free); + let _ = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); >::remove(&proposal_hash); - Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, old, deposit, who)); + Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, provider, deposit, who)); } /// Unlock tokens that have an expired lock. @@ -1157,21 +1093,13 @@ decl_module! { /// /// - `target`: The account to remove the lock on. /// - /// Emits `Unlocked`. - /// /// # /// - `O(1)`. /// # #[weight = SimpleDispatchInfo::FixedNormal(10_000)] fn unlock(origin, target: T::AccountId) { ensure_signed(origin)?; - - let expiry = Locks::::get(&target).ok_or(Error::::NotLocked)?; - ensure!(expiry <= system::Module::::block_number(), Error::::NotExpired); - - T::Currency::remove_lock(DEMOCRACY_ID, &target); - Locks::::remove(&target); - Self::deposit_event(RawEvent::Unlocked(target)); + Self::update_lock(&target); } /// Become a proxy. @@ -1197,119 +1125,192 @@ decl_module! { *a = Some(ProxyState::Open(target)); }); } + + /// Remove a vote for a referendum. + /// + /// If: + /// - the referendum was cancelled, or + /// - the referendum is ongoing, or + /// - the referendum has ended such that + /// - the vote of the account was in opposition to the result; or + /// - there was no conviction to the account's vote; or + /// - the account made a split vote + /// ...then the vote is removed cleanly and a following call to `unlock` may result in more + /// funds being available. + /// + /// If, however, the referendum has ended and: + /// - it finished corresponding to the vote of the account, and + /// - the account made a standard vote with conviction, and + /// - the lock period of the conviction is not over + /// ...then the lock will be aggregated into the overall account's lock, which may involve + /// *overlocking* (where the two locks are combined into a single lock that is the maximum + /// of both the amount locked and the time is it locked for). + /// + /// The dispatch origin of this call must be _Signed_, and the signer must have a vote + /// registered for referendum `index`. + /// + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(10_000)] + fn remove_vote(origin, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::try_remove_vote(&who, index, UnvoteScope::Any) + } + + /// Remove a vote for a referendum. + /// + /// If the `target` is equal to the signer, then this function is exactly equivalent to + /// `remove_vote`. If not equal to the signer, then the vote must have expired, + /// either because the referendum was cancelled, because the voter lost the referendum or + /// because the conviction period is over. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `target`: The account of the vote to be removed; this account must have voted for + /// referendum `index`. + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(10_000)] + fn remove_other_vote(origin, target: T::AccountId, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; + Self::try_remove_vote(&target, index, scope)?; + Ok(()) + } + + /// Delegate the voting power (with some given conviction) of a proxied account. + /// + /// The balance delegated is locked for as long as it's delegated, and thereafter for the + /// time appropriate for the conviction's lock period. + /// + /// The dispatch origin of this call must be _Signed_, and the signing account must have + /// been set as the proxy account for `target`. + /// + /// - `target`: The account whole voting power shall be delegated and whose balance locked. + /// This account must either: + /// - be delegating already; or + /// - have no voting activity (if there is, then it will need to be removed/consolidated + /// through `reap_vote` or `unvote`). + /// - `to`: The account whose voting the `target` account's voting power will follow. + /// - `conviction`: The conviction that will be attached to the delegated votes. When the + /// account is undelegated, the funds will be locked for the corresponding period. + /// - `balance`: The amount of the account's balance to be used in delegating. This must + /// not be more than the account's current balance. + /// + /// Emits `Delegated`. + /// + /// # + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000)] + pub fn proxy_delegate(origin, + to: T::AccountId, + conviction: Conviction, + balance: BalanceOf, + ) { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_delegate(target, to, conviction, balance)?; + } + + /// Undelegate the voting power of a proxied account. + /// + /// Tokens may be unlocked following once an amount of time consistent with the lock period + /// of the conviction with which the delegation was issued. + /// + /// The dispatch origin of this call must be _Signed_ and the signing account must be a + /// proxy for some other account which is currently delegating. + /// + /// Emits `Undelegated`. + /// + /// # + /// - O(1). + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000)] + fn proxy_undelegate(origin) { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_undelegate(target)?; + } + + /// Remove a proxied vote for a referendum. + /// + /// Exactly equivalent to `remove_vote` except that it operates on the account that the + /// sender is a proxy for. + /// + /// The dispatch origin of this call must be _Signed_ and the signing account must be a + /// proxy for some other account which has a registered vote for the referendum of `index`. + /// + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(10_000)] + fn proxy_remove_vote(origin, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_remove_vote(&target, index, UnvoteScope::Any) + } + + /// Enact a proposal from a referendum. For now we just make the weight be the maximum. + #[weight = SimpleDispatchInfo::MaxNormal] + fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + ensure_root(origin)?; + Self::do_enact_proposal(proposal_hash, index) + } } } impl Module { + fn migrate() { + use frame_support::{Twox64Concat, migration::{StorageKeyIterator, remove_storage_prefix}}; + remove_storage_prefix(b"Democracy", b"VotesOf", &[]); + remove_storage_prefix(b"Democracy", b"VotersFor", &[]); + remove_storage_prefix(b"Democracy", b"Delegations", &[]); + for (who, (end, proposal_hash, threshold, delay)) + in StorageKeyIterator::< + ReferendumIndex, + (T::BlockNumber, T::Hash, VoteThreshold, T::BlockNumber), + Twox64Concat, + >::new(b"Democracy", b"ReferendumInfoOf").drain() + { + let status = ReferendumStatus { + end, proposal_hash, threshold, delay, tally: Tally::default() + }; + ReferendumInfoOf::::insert(who, ReferendumInfo::Ongoing(status)) + } + } + // exposed immutables. /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal /// index. - pub fn locked_for(proposal: PropIndex) -> Option> { + pub fn backing_for(proposal: PropIndex) -> Option> { Self::deposit_of(proposal).map(|(d, l)| d * (l.len() as u32).into()) } - /// Return true if `ref_index` is an on-going referendum. - pub fn is_active_referendum(ref_index: ReferendumIndex) -> bool { - >::contains_key(ref_index) - } - - /// Get all referenda currently active. - pub fn active_referenda() - -> Vec<(ReferendumIndex, ReferendumInfo)> - { - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); - (next..last).into_iter() - .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) - .collect() - } - /// Get all referenda ready for tally at block `n`. pub fn maturing_referenda_at( n: T::BlockNumber - ) -> Vec<(ReferendumIndex, ReferendumInfo)> { + ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { let next = Self::lowest_unbaked(); let last = Self::referendum_count(); (next..last).into_iter() - .filter_map(|i| Self::referendum_info(i).map(|info| (i, info))) - .filter(|&(_, ref info)| info.end == n) + .map(|i| (i, Self::referendum_info(i))) + .filter_map(|(i, maybe_info)| match maybe_info { + Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), + _ => None, + }) + .filter(|(_, status)| status.end == n) .collect() } - /// Get the voters for the current proposal. - pub fn tally(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { - let (approve, against, capital): - (BalanceOf, BalanceOf, BalanceOf) = Self::voters_for(ref_index) - .iter() - .map(|voter| ( - T::Currency::total_balance(voter), Self::vote_of((ref_index, voter.clone())) - )) - .map(|(balance, Vote { aye, conviction })| { - let (votes, turnout) = conviction.votes(balance); - if aye { - (votes, Zero::zero(), turnout) - } else { - (Zero::zero(), votes, turnout) - } - }).fold( - (Zero::zero(), Zero::zero(), Zero::zero()), - |(a, b, c), (d, e, f)| (a + d, b + e, c + f) - ); - let (del_approve, del_against, del_capital) = Self::tally_delegation(ref_index); - (approve + del_approve, against + del_against, capital + del_capital) - } - - /// Get the delegated voters for the current proposal. - /// I think this goes into a worker once https://github.com/paritytech/substrate/issues/1458 is - /// done. - fn tally_delegation(ref_index: ReferendumIndex) -> (BalanceOf, BalanceOf, BalanceOf) { - Self::voters_for(ref_index).iter().fold( - (Zero::zero(), Zero::zero(), Zero::zero()), - |(approve_acc, against_acc, turnout_acc), voter| { - let Vote { aye, conviction } = Self::vote_of((ref_index, voter.clone())); - let (votes, turnout) = Self::delegated_votes( - ref_index, - voter.clone(), - conviction, - MAX_RECURSION_LIMIT - ); - if aye { - (approve_acc + votes, against_acc, turnout_acc + turnout) - } else { - (approve_acc, against_acc + votes, turnout_acc + turnout) - } - } - ) - } - - fn delegated_votes( - ref_index: ReferendumIndex, - to: T::AccountId, - parent_conviction: Conviction, - recursion_limit: u32, - ) -> (BalanceOf, BalanceOf) { - if recursion_limit == 0 { return (Zero::zero(), Zero::zero()); } - >::enumerate() - .filter(|(delegator, (delegate, _))| - *delegate == to && !>::contains_key(&(ref_index, delegator.clone())) - ).fold( - (Zero::zero(), Zero::zero()), - |(votes_acc, turnout_acc), (delegator, (_delegate, max_conviction))| { - let conviction = Conviction::min(parent_conviction, max_conviction); - let balance = T::Currency::total_balance(&delegator); - let (votes, turnout) = conviction.votes(balance); - let (del_votes, del_turnout) = Self::delegated_votes( - ref_index, - delegator, - conviction, - recursion_limit - 1 - ); - (votes_acc + votes + del_votes, turnout_acc + turnout + del_turnout) - } - ) - } - // Exposed mutables. #[cfg(feature = "std")] @@ -1339,21 +1340,236 @@ impl Module { /// Remove a referendum. pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { Self::deposit_event(RawEvent::Cancelled(ref_index)); - >::clear_referendum(ref_index); + ReferendumInfoOf::::remove(ref_index); } // private. - /// Actually enact a vote, if legit. - fn do_vote(who: T::AccountId, ref_index: ReferendumIndex, vote: Vote) -> DispatchResult { - ensure!(Self::is_active_referendum(ref_index), Error::::ReferendumInvalid); - if !>::contains_key((ref_index, &who)) { - >::append_or_insert(ref_index, &[&who][..]); + /// Ok if the given referendum is active, Err otherwise + fn ensure_ongoing(r: ReferendumInfo>) + -> Result>, DispatchError> + { + match r { + ReferendumInfo::Ongoing(s) => Ok(s), + _ => Err(Error::::ReferendumInvalid.into()), } - >::insert((ref_index, &who), vote); + } + + fn referendum_status(ref_index: ReferendumIndex) + -> Result>, DispatchError> + { + let info = ReferendumInfoOf::::get(ref_index) + .ok_or(Error::::ReferendumInvalid)?; + Self::ensure_ongoing(info) + } + + /// Actually enact a vote, if legit. + fn try_vote(who: &T::AccountId, ref_index: ReferendumIndex, vote: AccountVote>) -> DispatchResult { + let mut status = Self::referendum_status(ref_index)?; + ensure!(vote.balance() <= T::Currency::free_balance(who), Error::::InsufficientFunds); + VotingOf::::try_mutate(who, |voting| -> DispatchResult { + if let Voting::Direct { ref mut votes, delegations, .. } = voting { + match votes.binary_search_by_key(&ref_index, |i| i.0) { + Ok(i) => { + // Shouldn't be possible to fail, but we handle it gracefully. + status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; + if let Some(approve) = votes[i].1.as_standard() { + status.tally.reduce(approve, *delegations); + } + votes[i].1 = vote; + } + Err(i) => votes.insert(i, (ref_index, vote)), + } + // Shouldn't be possible to fail, but we handle it gracefully. + status.tally.add(vote).ok_or(Error::::Overflow)?; + if let Some(approve) = vote.as_standard() { + status.tally.increase(approve, *delegations); + } + Ok(()) + } else { + Err(Error::::AlreadyDelegating.into()) + } + })?; + // Extend the lock to `balance` (rather than setting it) since we don't know what other + // votes are in place. + T::Currency::extend_lock( + DEMOCRACY_ID, + who, + vote.balance(), + WithdrawReason::Transfer.into() + ); + ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); + Ok(()) + } + + /// Remove the account's vote for the given referendum if possible. This is possible when: + /// - The referendum has not finished. + /// - The referendum has finished and the voter lost their direction. + /// - The referendum has finished and the voter's lock period is up. + /// + /// This will generally be combined with a call to `unlock`. + fn try_remove_vote(who: &T::AccountId, ref_index: ReferendumIndex, scope: UnvoteScope) -> DispatchResult { + let info = ReferendumInfoOf::::get(ref_index); + VotingOf::::try_mutate(who, |voting| -> DispatchResult { + if let Voting::Direct { ref mut votes, delegations, ref mut prior } = voting { + let i = votes.binary_search_by_key(&ref_index, |i| i.0).map_err(|_| Error::::NotVoter)?; + match info { + Some(ReferendumInfo::Ongoing(mut status)) => { + ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); + // Shouldn't be possible to fail, but we handle it gracefully. + status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; + if let Some(approve) = votes[i].1.as_standard() { + status.tally.reduce(approve, *delegations); + } + ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); + } + Some(ReferendumInfo::Finished{end, approved}) => + if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { + let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); + let now = system::Module::::block_number(); + if now < unlock_at { + ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); + prior.accumulate(unlock_at, balance) + } + }, + None => {} // Referendum was cancelled. + } + votes.remove(i); + } + Ok(()) + })?; + Ok(()) + } + + fn increase_upstream_delegation(who: &T::AccountId, amount: Delegations>) { + VotingOf::::mutate(who, |voting| match voting { + Voting::Delegating { delegations, .. } => + // We don't support second level delegating, so we don't need to do anything more. + *delegations = delegations.saturating_add(amount), + Voting::Direct { votes, delegations, .. } => { + *delegations = delegations.saturating_add(amount); + for &(ref_index, account_vote) in votes.iter() { + if let AccountVote::Standard { vote, .. } = account_vote { + ReferendumInfoOf::::mutate(ref_index, |maybe_info| + if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { + status.tally.increase(vote.aye, amount); + } + ); + } + } + } + }) + } + + fn reduce_upstream_delegation(who: &T::AccountId, amount: Delegations>) { + VotingOf::::mutate(who, |voting| match voting { + Voting::Delegating { delegations, .. } => + // We don't support second level delegating, so we don't need to do anything more. + *delegations = delegations.saturating_sub(amount), + Voting::Direct { votes, delegations, .. } => { + *delegations = delegations.saturating_sub(amount); + for &(ref_index, account_vote) in votes.iter() { + if let AccountVote::Standard { vote, .. } = account_vote { + ReferendumInfoOf::::mutate(ref_index, |maybe_info| + if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { + status.tally.reduce(vote.aye, amount); + } + ); + } + } + } + }) + } + + /// Attempt to delegate `balance` times `conviction` of voting power from `who` to `target`. + fn try_delegate( + who: T::AccountId, + target: T::AccountId, + conviction: Conviction, + balance: BalanceOf, + ) -> DispatchResult { + ensure!(who != target, Error::::Nonsense); + ensure!(balance <= T::Currency::free_balance(&who), Error::::InsufficientFunds); + VotingOf::::try_mutate(&who, |voting| -> DispatchResult { + let mut old = Voting::Delegating { + balance, + target: target.clone(), + conviction, + delegations: Default::default(), + prior: Default::default(), + }; + sp_std::mem::swap(&mut old, voting); + match old { + Voting::Delegating { balance, target, conviction, delegations, prior, .. } => { + // remove any delegation votes to our current target. + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + voting.set_common(delegations, prior); + } + Voting::Direct { votes, delegations, prior } => { + // here we just ensure that we're currently idling with no votes recorded. + ensure!(votes.is_empty(), Error::::VotesExist); + voting.set_common(delegations, prior); + } + } + Self::increase_upstream_delegation(&target, conviction.votes(balance)); + // Extend the lock to `balance` (rather than setting it) since we don't know what other + // votes are in place. + T::Currency::extend_lock( + DEMOCRACY_ID, + &who, + balance, + WithdrawReason::Transfer.into() + ); + Ok(()) + })?; + Self::deposit_event(Event::::Delegated(who, target)); + Ok(()) + } + + /// Attempt to end the current delegation. + fn try_undelegate(who: T::AccountId) -> DispatchResult { + VotingOf::::try_mutate(&who, |voting| -> DispatchResult { + let mut old = Voting::default(); + sp_std::mem::swap(&mut old, voting); + match old { + Voting::Delegating { + balance, + target, + conviction, + delegations, + mut prior, + } => { + // remove any delegation votes to our current target. + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + let now = system::Module::::block_number(); + let lock_periods = conviction.lock_periods().into(); + prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); + voting.set_common(delegations, prior); + } + Voting::Direct { .. } => { + return Err(Error::::NotDelegating.into()) + } + } + Ok(()) + })?; + Self::deposit_event(Event::::Undelegated(who)); Ok(()) } + /// Rejig the lock on an account. It will never get more stringent (since that would indicate + /// a security hole) but may be reduced from what they are currently. + fn update_lock(who: &T::AccountId) { + let lock_needed = VotingOf::::mutate(who, |voting| { + voting.rejig(system::Module::::block_number()); + voting.locked_balance() + }); + if lock_needed.is_zero() { + T::Currency::remove_lock(DEMOCRACY_ID, who); + } else { + T::Currency::set_lock(DEMOCRACY_ID, who, lock_needed, WithdrawReason::Transfer.into()); + } + } + /// Start a referendum fn inject_referendum( end: T::BlockNumber, @@ -1363,51 +1579,13 @@ impl Module { ) -> ReferendumIndex { let ref_index = Self::referendum_count(); ReferendumCount::put(ref_index + 1); - let item = ReferendumInfo { end, proposal_hash, threshold, delay }; + let status = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; + let item = ReferendumInfo::Ongoing(status); >::insert(ref_index, item); Self::deposit_event(RawEvent::Started(ref_index, threshold)); ref_index } - /// Remove all info on a referendum. - fn clear_referendum(ref_index: ReferendumIndex) { - >::remove(ref_index); - - LowestUnbaked::mutate(|i| if *i == ref_index { - *i += 1; - let end = ReferendumCount::get(); - while !Self::is_active_referendum(*i) && *i < end { - *i += 1; - } - }); - >::remove(ref_index); - for v in Self::voters_for(ref_index) { - >::remove((ref_index, v)); - } - } - - /// Enact a proposal from a referendum. - fn enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { - if let Some((encoded_proposal, who, amount, _)) = >::take(&proposal_hash) { - if let Ok(proposal) = T::Proposal::decode(&mut &encoded_proposal[..]) { - let _ = T::Currency::unreserve(&who, amount); - Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, who, amount)); - - let ok = proposal.dispatch(frame_system::RawOrigin::Root.into()).is_ok(); - Self::deposit_event(RawEvent::Executed(index, ok)); - - Ok(()) - } else { - T::Slash::on_unbalanced(T::Currency::slash_reserved(&who, amount).0); - Self::deposit_event(RawEvent::PreimageInvalid(proposal_hash, index)); - Err(Error::::PreimageInvalid.into()) - } - } else { - Self::deposit_event(RawEvent::PreimageMissing(proposal_hash, index)); - Err(Error::::PreimageMissing.into()) - } - } - /// Table the next waiting proposal for a vote. fn launch_next(now: T::BlockNumber) -> DispatchResult { if LastTabledWasExternal::take() { @@ -1439,7 +1617,7 @@ impl Module { let mut public_props = Self::public_props(); if let Some((winner_index, _)) = public_props.iter() .enumerate() - .max_by_key(|x| Self::locked_for((x.1).0).unwrap_or_else(Zero::zero) + .max_by_key(|x| Self::backing_for((x.1).0).unwrap_or_else(Zero::zero) /* ^^ defensive only: All current public proposals have an amount locked*/) { let (prop_index, proposal, _) = public_props.swap_remove(winner_index); @@ -1462,61 +1640,65 @@ impl Module { } else { Err(Error::::NoneWaiting)? } + } + + fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + let preimage = >::take(&proposal_hash); + if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { + if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { + let _ = T::Currency::unreserve(&provider, deposit); + Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, provider, deposit)); + let ok = proposal.dispatch(frame_system::RawOrigin::Root.into()).is_ok(); + Self::deposit_event(RawEvent::Executed(index, ok)); + + Ok(()) + } else { + T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); + Self::deposit_event(RawEvent::PreimageInvalid(proposal_hash, index)); + Err(Error::::PreimageInvalid.into()) + } + } else { + Self::deposit_event(RawEvent::PreimageMissing(proposal_hash, index)); + Err(Error::::PreimageMissing.into()) + } } fn bake_referendum( now: T::BlockNumber, index: ReferendumIndex, - info: ReferendumInfo - ) -> DispatchResult { - let (approve, against, capital) = Self::tally(index); + status: ReferendumStatus>, + ) -> Result { let total_issuance = T::Currency::total_issuance(); - let approved = info.threshold.approved(approve, against, capital, total_issuance); - let enactment_period = T::EnactmentPeriod::get(); - - // Logic defined in https://www.slideshare.net/gavofyork/governance-in-polkadot-poc3 - // Essentially, we extend the lock-period of the coins behind the winning votes to be the - // vote strength times the public delay period from now. - for (a, lock_periods) in Self::voters_for(index).into_iter() - .map(|a| (a.clone(), Self::vote_of((index, a)))) - // ^^^ defensive only: all items come from `voters`; for an item to be in `voters` - // there must be a vote registered; qed - .filter(|&(_, vote)| vote.aye == approved) // Just the winning coins - .map(|(a, vote)| (a, vote.conviction.lock_periods())) - .filter(|&(_, lock_periods)| !lock_periods.is_zero()) // Just the lock votes - { - // now plus: the base lock period multiplied by the number of periods this voter - // offered to lock should they win... - let locked_until = now + enactment_period * lock_periods.into(); - Locks::::insert(&a, locked_until); - // ...extend their bondage until at least then. - T::Currency::extend_lock( - DEMOCRACY_ID, - &a, - Bounded::max_value(), - WithdrawReason::Transfer.into() - ); - } - - Self::clear_referendum(index); + let approved = status.threshold.approved(status.tally, total_issuance); if approved { Self::deposit_event(RawEvent::Passed(index)); - if info.delay.is_zero() { - let _ = Self::enact_proposal(info.proposal_hash, index); + if status.delay.is_zero() { + let _ = Self::do_enact_proposal(status.proposal_hash, index); } else { - let item = (now + info.delay,info.proposal_hash, index); - >::mutate(|queue| { - let pos = queue.binary_search_by_key(&item.0, |x| x.0).unwrap_or_else(|e| e); - queue.insert(pos, item); + let when = now + status.delay; + // Note that we need the preimage now. + Preimages::::mutate_exists(&status.proposal_hash, |maybe_pre| match *maybe_pre { + Some(PreimageStatus::Available { ref mut expiry, .. }) => *expiry = Some(when), + ref mut a => *a = Some(PreimageStatus::Missing(when)), }); + + if T::Scheduler::schedule_named( + (DEMOCRACY_ID, index), + when, + None, + 63, + Call::enact_proposal(status.proposal_hash, index).into(), + ).is_err() { + frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); + } } } else { Self::deposit_event(RawEvent::NotPassed(index)); } - Ok(()) + Ok(approved) } /// Current era is ending; we should finish up any proposals. @@ -1530,1386 +1712,9 @@ impl Module { // tally up votes for any expiring referenda. for (index, info) in Self::maturing_referenda_at(now).into_iter() { - Self::bake_referendum(now, index, info)?; - } - - let queue = >::get(); - let mut used = 0; - // It's stored in order, so the earliest will always be at the start. - for &(_, proposal_hash, index) in queue.iter().take_while(|x| x.0 == now) { - let _ = Self::enact_proposal(proposal_hash.clone(), index); - used += 1; - } - if used != 0 { - >::put(&queue[used..]); + let approved = Self::bake_referendum(now, index, info)?; + ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); } Ok(()) } } - -#[cfg(test)] -mod tests { - use super::*; - use std::cell::RefCell; - use frame_support::{ - impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, - ord_parameter_types, traits::Contains, weights::Weight, - }; - use sp_core::H256; - use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, Bounded, BadOrigin}, - testing::Header, Perbill, - }; - use pallet_balances::{BalanceLock, Error as BalancesError}; - use frame_system::EnsureSignedBy; - - const AYE: Vote = Vote{ aye: true, conviction: Conviction::None }; - const NAY: Vote = Vote{ aye: false, conviction: Conviction::None }; - const BIG_AYE: Vote = Vote{ aye: true, conviction: Conviction::Locked1x }; - const BIG_NAY: Vote = Vote{ aye: false, conviction: Conviction::Locked1x }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - democracy::Democracy, - } - } - - // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. - #[derive(Clone, Eq, PartialEq, Debug)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - parameter_types! { - pub const LaunchPeriod: u64 = 2; - pub const VotingPeriod: u64 = 2; - pub const EmergencyVotingPeriod: u64 = 1; - pub const MinimumDeposit: u64 = 1; - pub const EnactmentPeriod: u64 = 2; - pub const CooloffPeriod: u64 = 2; - } - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - pub const Three: u64 = 3; - pub const Four: u64 = 4; - pub const Five: u64 = 5; - } - pub struct OneToFive; - impl Contains for OneToFive { - fn sorted_members() -> Vec { - vec![1, 2, 3, 4, 5] - } - } - thread_local! { - static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); - } - pub struct PreimageByteDeposit; - impl Get for PreimageByteDeposit { - fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } - } - impl super::Trait for Test { - type Proposal = Call; - type Event = (); - type Currency = pallet_balances::Module; - type EnactmentPeriod = EnactmentPeriod; - type LaunchPeriod = LaunchPeriod; - type VotingPeriod = VotingPeriod; - type EmergencyVotingPeriod = EmergencyVotingPeriod; - type MinimumDeposit = MinimumDeposit; - type ExternalOrigin = EnsureSignedBy; - type ExternalMajorityOrigin = EnsureSignedBy; - type ExternalDefaultOrigin = EnsureSignedBy; - type FastTrackOrigin = EnsureSignedBy; - type CancellationOrigin = EnsureSignedBy; - type VetoOrigin = EnsureSignedBy; - type CooloffPeriod = CooloffPeriod; - type PreimageByteDeposit = PreimageByteDeposit; - type Slash = (); - } - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage(&mut t).unwrap(); - sp_io::TestExternalities::new(t) - } - - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Democracy = Module; - - #[test] - fn params_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Democracy::referendum_count(), 0); - assert_eq!(Balances::free_balance(42), 0); - assert_eq!(Balances::total_issuance(), 210); - }); - } - - fn set_balance_proposal(value: u64) -> Vec { - Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() - } - - fn set_balance_proposal_hash(value: u64) -> H256 { - BlakeTwo256::hash(&set_balance_proposal(value)[..]) - } - - fn set_balance_proposal_hash_and_note(value: u64) -> H256 { - let p = set_balance_proposal(value); - let h = BlakeTwo256::hash(&p[..]); - match Democracy::note_preimage(Origin::signed(6), p) { - Ok(_) => (), - Err(x) if x == Error::::DuplicatePreimage.into() => (), - Err(x) => panic!(x), - } - h - } - - fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash(value), - delay - ) - } - - fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash_and_note(value), - delay - ) - } - - fn next_block() { - System::set_block_number(System::block_number() + 1); - assert_eq!(Democracy::begin_block(System::block_number()), Ok(())); - } - - fn fast_forward_to(n: u64) { - while System::block_number() < n { - next_block(); - } - } - - #[test] - fn missing_preimage_should_fail() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); - } - - #[test] - fn preimage_deposit_should_be_required_and_returned() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - // fee of 100 is too much. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); - assert_noop!( - Democracy::note_preimage(Origin::signed(6), vec![0; 500]), - BalancesError::::InsufficientBalance, - ); - // fee of 1 is reasonable. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn preimage_deposit_should_be_reapable_earlier_by_owner() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - assert_noop!( - Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2)), - Error::::Early - ); - next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2))); - - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::reserved_balance(6), 0); - }); - } - - #[test] - fn preimage_deposit_should_be_reapable() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), - Error::::PreimageMissing - ); - - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), - Error::::Early - ); - - next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2))); - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 48); - assert_eq!(Balances::free_balance(5), 62); - }); - } - - #[test] - fn noting_imminent_preimage_for_free_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_noop!( - Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2)), - Error::::NotImminent - ); - - next_block(); - - // Now we're in the dispatch queue it's all good. - assert_ok!(Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2))); - - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn reaping_imminent_preimage_should_fail() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let h = set_balance_proposal_hash_and_note(2); - let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - next_block(); - next_block(); - // now imminent. - assert_noop!(Democracy::reap_preimage(Origin::signed(6), h), Error::::Imminent); - }); - } - - #[test] - fn external_and_public_interleaving_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(1), - )); - assert_ok!(propose_set_balance_and_note(6, 2, 2)); - - fast_forward_to(2); - - // both waiting: external goes first. - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 4, - proposal_hash: set_balance_proposal_hash_and_note(1), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - // replenish external - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); - - fast_forward_to(4); - - // both waiting: public goes next. - assert_eq!( - Democracy::referendum_info(1), - Some(ReferendumInfo { - end: 6, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - // don't replenish public - - fast_forward_to(6); - - // it's external "turn" again, though since public is empty that doesn't really matter - assert_eq!( - Democracy::referendum_info(2), - Some(ReferendumInfo { - end: 8, - proposal_hash: set_balance_proposal_hash_and_note(3), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - // replenish external - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(5), - )); - - fast_forward_to(8); - - // external goes again because there's no public waiting. - assert_eq!( - Democracy::referendum_info(3), - Some(ReferendumInfo { - end: 10, - proposal_hash: set_balance_proposal_hash_and_note(5), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - // replenish both - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(7), - )); - assert_ok!(propose_set_balance_and_note(6, 4, 2)); - - fast_forward_to(10); - - // public goes now since external went last time. - assert_eq!( - Democracy::referendum_info(4), - Some(ReferendumInfo { - end: 12, - proposal_hash: set_balance_proposal_hash_and_note(4), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - // replenish public again - assert_ok!(propose_set_balance_and_note(6, 6, 2)); - // cancel external - let h = set_balance_proposal_hash_and_note(7); - assert_ok!(Democracy::veto_external(Origin::signed(3), h)); - - fast_forward_to(12); - - // public goes again now since there's no external waiting. - assert_eq!( - Democracy::referendum_info(5), - Some(ReferendumInfo { - end: 14, - proposal_hash: set_balance_proposal_hash_and_note(6), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - }); - } - - - #[test] - fn emergency_cancel_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 2 - ); - assert!(Democracy::referendum_info(r).is_some()); - - assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); - assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); - assert!(Democracy::referendum_info(r).is_none()); - - // some time later... - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 2 - ); - assert!(Democracy::referendum_info(r).is_some()); - assert_noop!(Democracy::emergency_cancel(Origin::signed(4), r), Error::::AlreadyCanceled); - }); - } - - #[test] - fn veto_external_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert!(>::exists()); - - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); - // cancelled. - assert!(!>::exists()); - // fails - same proposal can't be resubmitted. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); - - fast_forward_to(1); - // fails as we're still in cooloff period. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); - - fast_forward_to(2); - // works; as we're out of the cooloff period. - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert!(>::exists()); - - // 3 can't veto the same thing twice. - assert_noop!( - Democracy::veto_external(Origin::signed(3), h.clone()), - Error::::AlreadyVetoed - ); - - // 4 vetoes. - assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); - // cancelled again. - assert!(!>::exists()); - - fast_forward_to(3); - // same proposal fails as we're still in cooloff - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); - // different proposal works fine. - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); - }); - } - - #[test] - fn external_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose( - Origin::signed(1), - set_balance_proposal_hash(2), - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(1), - ), Error::::DuplicateProposal); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - }); - } - - #[test] - fn external_majority_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose_majority( - Origin::signed(1), - set_balance_proposal_hash(2) - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), - set_balance_proposal_hash_and_note(2) - )); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SimpleMajority, - delay: 2, - }) - ); - }); - } - - #[test] - fn external_default_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose_default( - Origin::signed(3), - set_balance_proposal_hash(2) - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose_default( - Origin::signed(1), - set_balance_proposal_hash_and_note(2) - )); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SuperMajorityAgainst, - delay: 2, - }) - ); - }); - } - - #[test] - fn fast_track_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); - assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), - set_balance_proposal_hash_and_note(2) - )); - assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); - assert_ok!(Democracy::fast_track(Origin::signed(5), h, 0, 0)); - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 1, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SimpleMajority, - delay: 0, - }) - ); - }); - } - - #[test] - fn fast_track_referendum_fails_when_no_simple_majority() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2) - )); - assert_noop!( - Democracy::fast_track(Origin::signed(5), h, 3, 2), - Error::::NotSimpleMajority - ); - }); - } - - #[test] - fn locked_for_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); - assert_eq!(Democracy::locked_for(0), Some(2)); - assert_eq!(Democracy::locked_for(1), Some(4)); - assert_eq!(Democracy::locked_for(2), Some(3)); - }); - } - - #[test] - fn single_proposal_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - assert!(Democracy::referendum_info(0).is_none()); - - // start of 2 => next referendum scheduled. - fast_forward_to(2); - - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!( - Democracy::referendum_info(0), - Some(ReferendumInfo { - end: 4, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2 - }) - ); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (1, 0, 1)); - - fast_forward_to(3); - - // referendum still running - assert!(Democracy::referendum_info(0).is_some()); - - // referendum runs during 2 and 3, ends @ start of 4. - fast_forward_to(4); - - assert!(Democracy::referendum_info(0).is_none()); - assert_eq!(Democracy::dispatch_queue(), vec![ - (6, set_balance_proposal_hash_and_note(2), 0) - ]); - - // referendum passes and wait another two blocks for enactment. - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn cancel_queued_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - // start of 2 => next referendum scheduled. - fast_forward_to(2); - - assert_ok!(Democracy::vote(Origin::signed(1), 0, AYE)); - - fast_forward_to(4); - - assert_eq!(Democracy::dispatch_queue(), vec![ - (6, set_balance_proposal_hash_and_note(2), 0) - ]); - - assert_noop!(Democracy::cancel_queued(Origin::ROOT, 1), Error::::ProposalMissing); - assert_ok!(Democracy::cancel_queued(Origin::ROOT, 0)); - assert_eq!(Democracy::dispatch_queue(), vec![]); - }); - } - - #[test] - fn proxy_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Democracy::proxy(10), None); - assert!(System::allow_death(&10)); - - assert_noop!(Democracy::activate_proxy(Origin::signed(1), 10), Error::::NotOpen); - - assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); - assert!(!System::allow_death(&10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); - - assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::WrongOpen); - assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); - - // Can't set when already set. - assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::AlreadyProxy); - - // But this works because 11 isn't proxying. - assert_ok!(Democracy::open_proxy(Origin::signed(11), 2)); - assert_ok!(Democracy::activate_proxy(Origin::signed(2), 11)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); - assert_eq!(Democracy::proxy(11), Some(ProxyState::Active(2))); - - // 2 cannot fire 1's proxy: - assert_noop!(Democracy::deactivate_proxy(Origin::signed(2), 10), Error::::WrongProxy); - - // 1 deactivates their proxy: - assert_ok!(Democracy::deactivate_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); - // but the proxy account cannot be killed until the proxy is closed. - assert!(!System::allow_death(&10)); - - // and then 10 closes it completely: - assert_ok!(Democracy::close_proxy(Origin::signed(10))); - assert_eq!(Democracy::proxy(10), None); - assert!(System::allow_death(&10)); - - // 11 just closes without 2's "permission". - assert_ok!(Democracy::close_proxy(Origin::signed(11))); - assert_eq!(Democracy::proxy(11), None); - assert!(System::allow_death(&11)); - }); - } - - #[test] - fn single_proposal_should_work_with_proxy() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - let r = 0; - assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); - assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); - assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, AYE)); - - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (1, 0, 1)); - - fast_forward_to(6); - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::max_value())); - - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - // Delegated vote is counted. - assert_eq!(Democracy::tally(r), (3, 0, 3)); - - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_cyclic_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - // Check behavior with cycle. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::max_value())); - assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::max_value())); - assert_ok!(Democracy::delegate(Origin::signed(1), 3, Conviction::max_value())); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_eq!(Democracy::voters_for(r), vec![1]); - - // Delegated vote is counted. - assert_eq!(Democracy::tally(r), (6, 0, 6)); - - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - /// If transactor already voted, delegated vote is overwritten. - fn single_proposal_should_work_with_vote_and_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - // Vote. - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::max_value())); - assert_eq!(Democracy::voters_for(r), vec![1, 2]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (3, 0, 3)); - - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn single_proposal_should_work_with_undelegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - // Delegate and undelegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::max_value())); - assert_ok!(Democracy::undelegate(Origin::signed(2))); - - fast_forward_to(2); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (1, 0, 1)); - - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - /// If transactor voted, delegated vote is overwritten. - fn single_proposal_should_work_with_delegation_and_vote() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - let r = 0; - - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - // Delegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::max_value())); - - // Vote. - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!(Democracy::voters_for(r), vec![1, 2]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - - // Delegated vote is not counted. - assert_eq!(Democracy::tally(r), (3, 0, 3)); - - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn deposit_for_proposals_should_be_taken() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::free_balance(2), 15); - assert_eq!(Balances::free_balance(5), 35); - }); - } - - #[test] - fn deposit_for_proposals_should_be_returned() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - fast_forward_to(3); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 20); - assert_eq!(Balances::free_balance(5), 50); - }); - } - - #[test] - fn proposal_with_deposit_below_minimum_should_not_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_noop!(propose_set_balance(1, 2, 0), Error::::ValueLow); - }); - } - - #[test] - fn poor_proposer_should_not_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_noop!(propose_set_balance(1, 2, 11), BalancesError::::InsufficientBalance); - }); - } - - #[test] - fn poor_seconder_should_not_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - assert_ok!(propose_set_balance_and_note(2, 2, 11)); - assert_noop!(Democracy::second(Origin::signed(1), 0), BalancesError::::InsufficientBalance); - }); - } - - #[test] - fn runners_up_should_come_after() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); - fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), 0, AYE)); - fast_forward_to(4); - assert_ok!(Democracy::vote(Origin::signed(1), 1, AYE)); - fast_forward_to(6); - assert_ok!(Democracy::vote(Origin::signed(1), 2, AYE)); - }); - } - - #[test] - fn ooo_inject_referendums_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r1 = Democracy::inject_referendum( - 3, - set_balance_proposal_hash_and_note(3), - VoteThreshold::SuperMajorityApprove, - 0 - ); - let r2 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - - assert_ok!(Democracy::vote(Origin::signed(1), r2, AYE)); - assert_eq!(Democracy::voters_for(r2), vec![1]); - assert_eq!(Democracy::vote_of((r2, 1)), AYE); - assert_eq!(Democracy::tally(r2), (1, 0, 1)); - - next_block(); - assert_eq!(Balances::free_balance(42), 2); - - assert_ok!(Democracy::vote(Origin::signed(1), r1, AYE)); - assert_eq!(Democracy::voters_for(r1), vec![1]); - assert_eq!(Democracy::vote_of((r1, 1)), AYE); - assert_eq!(Democracy::tally(r1), (1, 0, 1)); - - next_block(); - assert_eq!(Balances::free_balance(42), 3); - }); - } - - #[test] - fn simple_passing_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), AYE); - assert_eq!(Democracy::tally(r), (1, 0, 1)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn cancel_referendum_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_ok!(Democracy::cancel_referendum(Origin::ROOT, r.into())); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); - } - - #[test] - fn simple_failing_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, NAY)); - - assert_eq!(Democracy::voters_for(r), vec![1]); - assert_eq!(Democracy::vote_of((r, 1)), NAY); - assert_eq!(Democracy::tally(r), (0, 1, 1)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); - } - - #[test] - fn controversial_voting_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - - assert_ok!(Democracy::vote(Origin::signed(1), r, BIG_AYE)); - assert_ok!(Democracy::vote(Origin::signed(2), r, BIG_NAY)); - assert_ok!(Democracy::vote(Origin::signed(3), r, BIG_NAY)); - assert_ok!(Democracy::vote(Origin::signed(4), r, BIG_AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, BIG_NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, BIG_AYE)); - - assert_eq!(Democracy::tally(r), (110, 100, 210)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn delayed_enactment_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 1 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(2), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(3), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(4), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, AYE)); - assert_ok!(Democracy::vote(Origin::signed(6), r, AYE)); - - assert_eq!(Democracy::tally(r), (21, 0, 21)); - - next_block(); - assert_eq!(Balances::free_balance(42), 0); - - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn controversial_low_turnout_voting_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r, BIG_NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, BIG_AYE)); - - assert_eq!(Democracy::tally(r), (60, 50, 110)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); - } - - #[test] - fn passing_low_turnout_voting_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_eq!(Balances::total_issuance(), 210); - - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(4), r, BIG_AYE)); - assert_ok!(Democracy::vote(Origin::signed(5), r, BIG_NAY)); - assert_ok!(Democracy::vote(Origin::signed(6), r, BIG_AYE)); - - assert_eq!(Democracy::tally(r), (100, 50, 150)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } - - #[test] - fn lock_voting_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, Vote { - aye: false, - conviction: Conviction::Locked5x - })); - assert_ok!(Democracy::vote(Origin::signed(2), r, Vote { - aye: true, - conviction: Conviction::Locked4x - })); - assert_ok!(Democracy::vote(Origin::signed(3), r, Vote { - aye: true, - conviction: Conviction::Locked3x - })); - assert_ok!(Democracy::vote(Origin::signed(4), r, Vote { - aye: true, - conviction: Conviction::Locked2x - })); - assert_ok!(Democracy::vote(Origin::signed(5), r, Vote { - aye: false, - conviction: Conviction::Locked1x - })); - - assert_eq!(Democracy::tally(r), (250, 100, 150)); - - fast_forward_to(2); - - assert_eq!(Balances::locks(1), vec![]); - assert_eq!(Balances::locks(2), vec![BalanceLock { - id: DEMOCRACY_ID, - amount: u64::max_value(), - reasons: pallet_balances::Reasons::Misc, - }]); - assert_eq!(Democracy::locks(2), Some(18)); - assert_eq!(Balances::locks(3), vec![BalanceLock { - id: DEMOCRACY_ID, - amount: u64::max_value(), - reasons: pallet_balances::Reasons::Misc, - }]); - assert_eq!(Democracy::locks(3), Some(10)); - assert_eq!(Balances::locks(4), vec![BalanceLock { - id: DEMOCRACY_ID, - amount: u64::max_value(), - reasons: pallet_balances::Reasons::Misc, - }]); - assert_eq!(Democracy::locks(4), Some(6)); - assert_eq!(Balances::locks(5), vec![]); - - assert_eq!(Balances::free_balance(42), 2); - - assert_noop!(Democracy::unlock(Origin::signed(1), 1), Error::::NotLocked); - - fast_forward_to(5); - assert_noop!(Democracy::unlock(Origin::signed(1), 4), Error::::NotExpired); - fast_forward_to(6); - assert_ok!(Democracy::unlock(Origin::signed(1), 4)); - assert_noop!(Democracy::unlock(Origin::signed(1), 4), Error::::NotLocked); - - fast_forward_to(9); - assert_noop!(Democracy::unlock(Origin::signed(1), 3), Error::::NotExpired); - fast_forward_to(10); - assert_ok!(Democracy::unlock(Origin::signed(1), 3)); - assert_noop!(Democracy::unlock(Origin::signed(1), 3), Error::::NotLocked); - - fast_forward_to(17); - assert_noop!(Democracy::unlock(Origin::signed(1), 2), Error::::NotExpired); - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(1), 2)); - assert_noop!(Democracy::unlock(Origin::signed(1), 2), Error::::NotLocked); - }); - } - - #[test] - fn no_locks_without_conviction_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, Vote { - aye: true, - conviction: Conviction::None, - })); - - fast_forward_to(2); - - assert_eq!(Balances::free_balance(42), 2); - assert_eq!(Balances::locks(1), vec![]); - }); - } - - #[test] - fn lock_voting_should_work_with_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, Vote { - aye: false, - conviction: Conviction::Locked5x - })); - assert_ok!(Democracy::vote(Origin::signed(2), r, Vote { - aye: true, - conviction: Conviction::Locked4x - })); - assert_ok!(Democracy::vote(Origin::signed(3), r, Vote { - aye: true, - conviction: Conviction::Locked3x - })); - assert_ok!(Democracy::delegate(Origin::signed(4), 2, Conviction::Locked2x)); - assert_ok!(Democracy::vote(Origin::signed(5), r, Vote { - aye: false, - conviction: Conviction::Locked1x - })); - - assert_eq!(Democracy::tally(r), (250, 100, 150)); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); - } -} diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..e7320da08280169d5847394c02a7ab8e35637182 --- /dev/null +++ b/frame/democracy/src/tests.rs @@ -0,0 +1,264 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The crate's tests. + +use super::*; +use std::cell::RefCell; +use codec::Encode; +use frame_support::{ + impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, + ord_parameter_types, traits::{Contains, OnInitialize}, weights::Weight, +}; +use sp_core::H256; +use sp_runtime::{ + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, Perbill, +}; +use pallet_balances::{BalanceLock, Error as BalancesError}; +use frame_system::EnsureSignedBy; + +mod cancellation; +mod delegation; +mod external_proposing; +mod fast_tracking; +mod lock_voting; +mod preimage; +mod proxying; +mod public_proposals; +mod scheduling; +mod voting; + +const AYE: Vote = Vote { aye: true, conviction: Conviction::None }; +const NAY: Vote = Vote { aye: false, conviction: Conviction::None }; +const BIG_AYE: Vote = Vote { aye: true, conviction: Conviction::Locked1x }; +const BIG_NAY: Vote = Vote { aye: false, conviction: Conviction::Locked1x }; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + pallet_balances::Balances, + democracy::Democracy, + } +} + +// Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; + pub const MaximumWeight: u32 = 1000000; +} +impl pallet_scheduler::Trait for Test { + type Event = (); + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} +parameter_types! { + pub const LaunchPeriod: u64 = 2; + pub const VotingPeriod: u64 = 2; + pub const FastTrackVotingPeriod: u64 = 2; + pub const MinimumDeposit: u64 = 1; + pub const EnactmentPeriod: u64 = 2; + pub const CooloffPeriod: u64 = 2; +} +ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; + pub const Five: u64 = 5; + pub const Six: u64 = 6; +} +pub struct OneToFive; +impl Contains for OneToFive { + fn sorted_members() -> Vec { + vec![1, 2, 3, 4, 5] + } + #[cfg(feature = "runtime-benchmarks")] + fn add(_m: &u64) {} +} +thread_local! { + static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); + static INSTANT_ALLOWED: RefCell = RefCell::new(false); +} +pub struct PreimageByteDeposit; +impl Get for PreimageByteDeposit { + fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } +} +pub struct InstantAllowed; +impl Get for InstantAllowed { + fn get() -> bool { INSTANT_ALLOWED.with(|v| *v.borrow()) } +} +impl super::Trait for Test { + type Proposal = Call; + type Event = (); + type Currency = pallet_balances::Module; + type EnactmentPeriod = EnactmentPeriod; + type LaunchPeriod = LaunchPeriod; + type VotingPeriod = VotingPeriod; + type FastTrackVotingPeriod = FastTrackVotingPeriod; + type MinimumDeposit = MinimumDeposit; + type ExternalOrigin = EnsureSignedBy; + type ExternalMajorityOrigin = EnsureSignedBy; + type ExternalDefaultOrigin = EnsureSignedBy; + type FastTrackOrigin = EnsureSignedBy; + type CancellationOrigin = EnsureSignedBy; + type VetoOrigin = EnsureSignedBy; + type CooloffPeriod = CooloffPeriod; + type PreimageByteDeposit = PreimageByteDeposit; + type Slash = (); + type InstantOrigin = EnsureSignedBy; + type InstantAllowed = InstantAllowed; + type Scheduler = Scheduler; +} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + }.assimilate_storage(&mut t).unwrap(); + GenesisConfig::default().assimilate_storage(&mut t).unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Scheduler = pallet_scheduler::Module; +type Democracy = Module; + +#[test] +fn params_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Democracy::referendum_count(), 0); + assert_eq!(Balances::free_balance(42), 0); + assert_eq!(Balances::total_issuance(), 210); + }); +} + +fn set_balance_proposal(value: u64) -> Vec { + Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() +} + +fn set_balance_proposal_hash(value: u64) -> H256 { + BlakeTwo256::hash(&set_balance_proposal(value)[..]) +} + +fn set_balance_proposal_hash_and_note(value: u64) -> H256 { + let p = set_balance_proposal(value); + let h = BlakeTwo256::hash(&p[..]); + match Democracy::note_preimage(Origin::signed(6), p) { + Ok(_) => (), + Err(x) if x == Error::::DuplicatePreimage.into() => (), + Err(x) => panic!(x), + } + h +} + +fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { + Democracy::propose( + Origin::signed(who), + set_balance_proposal_hash(value), + delay + ) +} + +fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { + Democracy::propose( + Origin::signed(who), + set_balance_proposal_hash_and_note(value), + delay + ) +} + +fn next_block() { + System::set_block_number(System::block_number() + 1); + Scheduler::on_initialize(System::block_number()); + assert_eq!(Democracy::begin_block(System::block_number()), Ok(())); +} + +fn fast_forward_to(n: u64) { + while System::block_number() < n { + next_block(); + } +} + +fn begin_referendum() -> ReferendumIndex { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + fast_forward_to(2); + 0 +} + +fn aye(who: u64) -> AccountVote { + AccountVote::Standard { vote: AYE, balance: Balances::free_balance(&who) } +} + +fn nay(who: u64) -> AccountVote { + AccountVote::Standard { vote: NAY, balance: Balances::free_balance(&who) } +} + +fn big_aye(who: u64) -> AccountVote { + AccountVote::Standard { vote: BIG_AYE, balance: Balances::free_balance(&who) } +} + +fn big_nay(who: u64) -> AccountVote { + AccountVote::Standard { vote: BIG_NAY, balance: Balances::free_balance(&who) } +} + +fn tally(r: ReferendumIndex) -> Tally { + Democracy::referendum_status(r).unwrap().tally +} diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs new file mode 100644 index 0000000000000000000000000000000000000000..998b0c14d8c8074738f847b8d7b9497e3340b4b5 --- /dev/null +++ b/frame/democracy/src/tests/cancellation.rs @@ -0,0 +1,91 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for cancelation functionality. + +use super::*; + +#[test] +fn cancel_referendum_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::cancel_referendum(Origin::ROOT, r.into())); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); +} + +#[test] +fn cancel_queued_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + // start of 2 => next referendum scheduled. + fast_forward_to(2); + + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + + fast_forward_to(4); + + assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); + + assert_noop!(Democracy::cancel_queued(Origin::ROOT, 1), Error::::ProposalMissing); + assert_ok!(Democracy::cancel_queued(Origin::ROOT, 0)); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); + }); +} + +#[test] +fn emergency_cancel_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 2 + ); + assert!(Democracy::referendum_status(r).is_ok()); + + assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); + assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); + assert!(Democracy::referendum_info(r).is_none()); + + // some time later... + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 2 + ); + assert!(Democracy::referendum_status(r).is_ok()); + assert_noop!( + Democracy::emergency_cancel(Origin::signed(4), r), + Error::::AlreadyCanceled, + ); + }); +} diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs new file mode 100644 index 0000000000000000000000000000000000000000..061a48b587798219f5562e8ca13fadd214c4cfe8 --- /dev/null +++ b/frame/democracy/src/tests/delegation.rs @@ -0,0 +1,178 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for functionality concerning delegation. + +use super::*; + +#[test] +fn single_proposal_should_work_with_delegation() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + // Delegate first vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); + + // Delegate a second vote. + assert_ok!(Democracy::delegate(Origin::signed(3), 1, Conviction::None, 30)); + assert_eq!(tally(r), Tally { ayes: 6, nays: 0, turnout: 60 }); + + // Reduce first vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 10)); + assert_eq!(tally(r), Tally { ayes: 5, nays: 0, turnout: 50 }); + + // Second vote delegates to first; we don't do tiered delegation, so it doesn't get used. + assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); + assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); + + // Main voter cancels their vote + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); + + // First delegator delegates half funds with conviction; nothing changes yet. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked1x, 10)); + assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); + + // Main voter reinstates their vote + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!(tally(r), Tally { ayes: 11, nays: 0, turnout: 20 }); + }); +} + +#[test] +fn self_delegation_not_allowed() { + new_test_ext().execute_with(|| { + assert_noop!( + Democracy::delegate(Origin::signed(1), 1, Conviction::None, 10), + Error::::Nonsense, + ); + }); +} + +#[test] +fn cyclic_delegation_should_unwind() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + // Check behavior with cycle. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); + assert_ok!(Democracy::delegate(Origin::signed(1), 3, Conviction::None, 10)); + let r = 0; + assert_ok!(Democracy::undelegate(Origin::signed(3))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::undelegate(Origin::signed(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + + // Delegated vote is counted. + assert_eq!(tally(r), Tally { ayes: 3, nays: 3, turnout: 60 }); + }); +} + +#[test] +fn single_proposal_should_work_with_vote_and_delegation() { + // If transactor already voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, nay(2))); + assert_eq!(tally(r), Tally { ayes: 1, nays: 2, turnout: 30 }); + + // Delegate vote. + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + // Delegated vote replaces the explicit vote. + assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); + }); +} + +#[test] +fn single_proposal_should_work_with_undelegation() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + // Delegate and undelegate vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + // Delegated vote is not counted. + assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + }); +} + +#[test] +fn single_proposal_should_work_with_delegation_and_vote() { + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + // Delegate, undelegate and vote. + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); + assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + // Delegated vote is not counted. + assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); + }); +} + +#[test] +fn conviction_should_be_honored_in_delegation() { + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + // Delegate, undelegate and vote. + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + // Delegated vote is huge. + assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); + }); +} + +#[test] +fn split_vote_delegation_should_be_ignored() { + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); + assert_ok!(Democracy::vote(Origin::signed(1), r, AccountVote::Split { aye: 10, nay: 0 })); + // Delegated vote is huge. + assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + }); +} diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs new file mode 100644 index 0000000000000000000000000000000000000000..a249a806ee9edf4af87b6ecc81920783ac29c431 --- /dev/null +++ b/frame/democracy/src/tests/external_proposing.rs @@ -0,0 +1,289 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for functionality concerning the "external" origin. + +use super::*; + +#[test] +fn veto_external_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert!(>::exists()); + + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); + // cancelled. + assert!(!>::exists()); + // fails - same proposal can't be resubmitted. + assert_noop!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash(2), + ), Error::::ProposalBlacklisted); + + fast_forward_to(1); + // fails as we're still in cooloff period. + assert_noop!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash(2), + ), Error::::ProposalBlacklisted); + + fast_forward_to(2); + // works; as we're out of the cooloff period. + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert!(>::exists()); + + // 3 can't veto the same thing twice. + assert_noop!( + Democracy::veto_external(Origin::signed(3), h.clone()), + Error::::AlreadyVetoed + ); + + // 4 vetoes. + assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); + // cancelled again. + assert!(!>::exists()); + + fast_forward_to(3); + // same proposal fails as we're still in cooloff + assert_noop!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash(2), + ), Error::::ProposalBlacklisted); + // different proposal works fine. + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); + }); +} + +#[test] +fn external_referendum_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose( + Origin::signed(1), + set_balance_proposal_hash(2), + ), + BadOrigin, + ); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert_noop!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash(1), + ), Error::::DuplicateProposal); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} + +#[test] +fn external_majority_referendum_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose_majority( + Origin::signed(1), + set_balance_proposal_hash(2) + ), + BadOrigin, + ); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SimpleMajority, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} + +#[test] +fn external_default_referendum_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose_default( + Origin::signed(3), + set_balance_proposal_hash(2) + ), + BadOrigin, + ); + assert_ok!(Democracy::external_propose_default( + Origin::signed(1), + set_balance_proposal_hash_and_note(2) + )); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SuperMajorityAgainst, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} + + +#[test] +fn external_and_public_interleaving_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(1), + )); + assert_ok!(propose_set_balance_and_note(6, 2, 2)); + + fast_forward_to(2); + + // both waiting: external goes first. + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash_and_note(1), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + // replenish external + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); + + fast_forward_to(4); + + // both waiting: public goes next. + assert_eq!( + Democracy::referendum_status(1), + Ok(ReferendumStatus { + end: 6, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + // don't replenish public + + fast_forward_to(6); + + // it's external "turn" again, though since public is empty that doesn't really matter + assert_eq!( + Democracy::referendum_status(2), + Ok(ReferendumStatus { + end: 8, + proposal_hash: set_balance_proposal_hash_and_note(3), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + // replenish external + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(5), + )); + + fast_forward_to(8); + + // external goes again because there's no public waiting. + assert_eq!( + Democracy::referendum_status(3), + Ok(ReferendumStatus { + end: 10, + proposal_hash: set_balance_proposal_hash_and_note(5), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + // replenish both + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(7), + )); + assert_ok!(propose_set_balance_and_note(6, 4, 2)); + + fast_forward_to(10); + + // public goes now since external went last time. + assert_eq!( + Democracy::referendum_status(4), + Ok(ReferendumStatus { + end: 12, + proposal_hash: set_balance_proposal_hash_and_note(4), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + // replenish public again + assert_ok!(propose_set_balance_and_note(6, 6, 2)); + // cancel external + let h = set_balance_proposal_hash_and_note(7); + assert_ok!(Democracy::veto_external(Origin::signed(3), h)); + + fast_forward_to(12); + + // public goes again now since there's no external waiting. + assert_eq!( + Democracy::referendum_status(5), + Ok(ReferendumStatus { + end: 14, + proposal_hash: set_balance_proposal_hash_and_note(6), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs new file mode 100644 index 0000000000000000000000000000000000000000..5ce9b15baf34cde31d2788679f3bdb106e1e8a84 --- /dev/null +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -0,0 +1,88 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for fast-tracking functionality. + +use super::*; + +#[test] +fn fast_track_referendum_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_ok!(Democracy::fast_track(Origin::signed(5), h, 2, 0)); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 2, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SimpleMajority, + delay: 0, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} + +#[test] +fn instant_referendum_works() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); + assert_noop!(Democracy::fast_track(Origin::signed(6), h, 1, 0), Error::::InstantNotAllowed); + INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); + assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 1, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SimpleMajority, + delay: 0, + tally: Tally { ayes: 0, nays: 0, turnout: 0 }, + }) + ); + }); +} + +#[test] +fn fast_track_referendum_fails_when_no_simple_majority() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::NotSimpleMajority + ); + }); +} diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs new file mode 100644 index 0000000000000000000000000000000000000000..e83d974a8dc27fc990950ed1ae3895946b643e03 --- /dev/null +++ b/frame/democracy/src/tests/lock_voting.rs @@ -0,0 +1,363 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for functionality concerning locking and lock-voting. + +use super::*; +use std::convert::TryFrom; + +fn aye(x: u8, balance: u64) -> AccountVote { + AccountVote::Standard { + vote: Vote { aye: true, conviction: Conviction::try_from(x).unwrap() }, + balance + } +} + +fn nay(x: u8, balance: u64) -> AccountVote { + AccountVote::Standard { + vote: Vote { aye: false, conviction: Conviction::try_from(x).unwrap() }, + balance + } +} + +fn the_lock(amount: u64) -> BalanceLock { + BalanceLock { + id: DEMOCRACY_ID, + amount, + reasons: pallet_balances::Reasons::Misc, + } +} + +#[test] +fn lock_voting_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(2, 40))); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); + + // All balances are currently locked. + for i in 1..=5 { + assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); + } + + fast_forward_to(2); + + // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 1)); + // Anyone can reap and unlock anyone else's in this context. + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 5, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 5)); + + // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 2, r), Error::::NoPermission); + // However, they can be unvoted by the owner, though it will make no difference to the lock. + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 2)); + + assert_eq!(Balances::locks(1), vec![]); + assert_eq!(Balances::locks(2), vec![the_lock(20)]); + assert_eq!(Balances::locks(3), vec![the_lock(30)]); + assert_eq!(Balances::locks(4), vec![the_lock(40)]); + assert_eq!(Balances::locks(5), vec![]); + assert_eq!(Balances::free_balance(42), 2); + + + fast_forward_to(5); + // No change yet... + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 4, r), Error::::NoPermission); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_eq!(Balances::locks(4), vec![the_lock(40)]); + fast_forward_to(6); + // 4 should now be able to reap and unlock + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_eq!(Balances::locks(4), vec![]); + + fast_forward_to(9); + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 3, r), Error::::NoPermission); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_eq!(Balances::locks(3), vec![the_lock(30)]); + fast_forward_to(10); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_eq!(Balances::locks(3), vec![]); + + // 2 doesn't need to reap_vote here because it was already done before. + fast_forward_to(17); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_eq!(Balances::locks(2), vec![the_lock(20)]); + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_eq!(Balances::locks(2), vec![]); + }); +} + +#[test] +fn no_locks_without_conviction_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(0, 10))); + + fast_forward_to(2); + + assert_eq!(Balances::free_balance(42), 2); + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 1, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 1)); + assert_eq!(Balances::locks(1), vec![]); + }); +} + +#[test] +fn lock_voting_should_work_with_delegation() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::delegate(Origin::signed(4), 2, Conviction::Locked2x, 40)); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + + assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); +} + +fn setup_three_referenda() -> (u32, u32, u32) { + System::set_block_number(0); + let r1 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); + + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); + + let r3 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); + + fast_forward_to(2); + + (r1, r2, r3) +} + +#[test] +fn prior_lockvotes_should_be_enforced() { + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + fast_forward_to(5); + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2), Error::::NoPermission); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(50)]); + fast_forward_to(6); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(9); + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1), Error::::NoPermission); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(10); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(17); + assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0), Error::::NoPermission); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(18); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} + +#[test] +fn single_consolidation_of_lockvotes_should_work_as_before() { + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + fast_forward_to(5); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(50)]); + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + + fast_forward_to(9); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + + fast_forward_to(17); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} + +#[test] +fn multi_consolidation_of_lockvotes_should_be_conservative() { + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} + +#[test] +fn locks_should_persist_from_voting_to_delegation() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); + fast_forward_to(2); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + // locked 10 until #18. + + assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked3x, 20)); + // locked 20. + assert!(Balances::locks(5)[0].amount == 20); + + assert_ok!(Democracy::undelegate(Origin::signed(5))); + // locked 20 until #10 + + fast_forward_to(9); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount == 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(17); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} + +#[test] +fn locks_should_persist_from_delegation_to_voting() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked5x, 5)); + assert_ok!(Democracy::undelegate(Origin::signed(5))); + // locked 5 until #32 + + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 5); + + fast_forward_to(32); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs new file mode 100644 index 0000000000000000000000000000000000000000..7d977b0ba83acd514e73fb77d18c485269451161 --- /dev/null +++ b/frame/democracy/src/tests/preimage.rs @@ -0,0 +1,157 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The preimage tests. + +use super::*; + +#[test] +fn missing_preimage_should_fail() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); +} + +#[test] +fn preimage_deposit_should_be_required_and_returned() { + new_test_ext().execute_with(|| { + // fee of 100 is too much. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); + assert_noop!( + Democracy::note_preimage(Origin::signed(6), vec![0; 500]), + BalancesError::::InsufficientBalance, + ); + // fee of 1 is reasonable. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn preimage_deposit_should_be_reapable_earlier_by_owner() { + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2)), + Error::::TooEarly + ); + next_block(); + assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2))); + + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::reserved_balance(6), 0); + }); +} + +#[test] +fn preimage_deposit_should_be_reapable() { + new_test_ext().execute_with(|| { + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), + Error::::PreimageMissing + ); + + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), + Error::::TooEarly + ); + + next_block(); + assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2))); + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 48); + assert_eq!(Balances::free_balance(5), 62); + }); +} + +#[test] +fn noting_imminent_preimage_for_free_should_work() { + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 1 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2)), + Error::::NotImminent + ); + + next_block(); + + // Now we're in the dispatch queue it's all good. + assert_ok!(Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2))); + + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn reaping_imminent_preimage_should_fail() { + new_test_ext().execute_with(|| { + let h = set_balance_proposal_hash_and_note(2); + let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + next_block(); + next_block(); + assert_noop!(Democracy::reap_preimage(Origin::signed(6), h), Error::::Imminent); + }); +} diff --git a/frame/democracy/src/tests/proxying.rs b/frame/democracy/src/tests/proxying.rs new file mode 100644 index 0000000000000000000000000000000000000000..412adf6be03e707a22e9cd3bb03df3c8f2e6c6cd --- /dev/null +++ b/frame/democracy/src/tests/proxying.rs @@ -0,0 +1,104 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for functionality concerning proxying. + +use super::*; + +#[test] +fn proxy_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Democracy::proxy(10), None); + assert!(System::allow_death(&10)); + + assert_noop!(Democracy::activate_proxy(Origin::signed(1), 10), Error::::NotOpen); + + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert!(!System::allow_death(&10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + + assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::WrongOpen); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); + + // Can't set when already set. + assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::AlreadyProxy); + + // But this works because 11 isn't proxying. + assert_ok!(Democracy::open_proxy(Origin::signed(11), 2)); + assert_ok!(Democracy::activate_proxy(Origin::signed(2), 11)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); + assert_eq!(Democracy::proxy(11), Some(ProxyState::Active(2))); + + // 2 cannot fire 1's proxy: + assert_noop!(Democracy::deactivate_proxy(Origin::signed(2), 10), Error::::WrongProxy); + + // 1 deactivates their proxy: + assert_ok!(Democracy::deactivate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + // but the proxy account cannot be killed until the proxy is closed. + assert!(!System::allow_death(&10)); + + // and then 10 closes it completely: + assert_ok!(Democracy::close_proxy(Origin::signed(10))); + assert_eq!(Democracy::proxy(10), None); + assert!(System::allow_death(&10)); + + // 11 just closes without 2's "permission". + assert_ok!(Democracy::close_proxy(Origin::signed(11))); + assert_eq!(Democracy::proxy(11), None); + assert!(System::allow_death(&11)); + }); +} + +#[test] +fn voting_and_removing_votes_should_work_with_proxy() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + + assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, aye(1))); + assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + + assert_ok!(Democracy::proxy_remove_vote(Origin::signed(10), r)); + assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); + }); +} + +#[test] +fn delegation_and_undelegation_should_work_with_proxy() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + + assert_ok!(Democracy::proxy_delegate(Origin::signed(10), 2, Conviction::None, 10)); + assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); + + assert_ok!(Democracy::proxy_undelegate(Origin::signed(10))); + assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); + }); +} + diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs new file mode 100644 index 0000000000000000000000000000000000000000..04246e86f1da4fb530fb47a58bb9fbb07120bc43 --- /dev/null +++ b/frame/democracy/src/tests/public_proposals.rs @@ -0,0 +1,98 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for the public proposal queue. + +use super::*; + +#[test] +fn backing_for_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_eq!(Democracy::backing_for(0), Some(2)); + assert_eq!(Democracy::backing_for(1), Some(4)); + assert_eq!(Democracy::backing_for(2), Some(3)); + }); +} + +#[test] +fn deposit_for_proposals_should_be_taken() { + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + assert_eq!(Balances::free_balance(5), 35); + }); +} + +#[test] +fn deposit_for_proposals_should_be_returned() { + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + fast_forward_to(3); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 20); + assert_eq!(Balances::free_balance(5), 50); + }); +} + +#[test] +fn proposal_with_deposit_below_minimum_should_not_work() { + new_test_ext().execute_with(|| { + assert_noop!(propose_set_balance(1, 2, 0), Error::::ValueLow); + }); +} + +#[test] +fn poor_proposer_should_not_work() { + new_test_ext().execute_with(|| { + assert_noop!(propose_set_balance(1, 2, 11), BalancesError::::InsufficientBalance); + }); +} + +#[test] +fn poor_seconder_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(2, 2, 11)); + assert_noop!(Democracy::second(Origin::signed(1), 0), BalancesError::::InsufficientBalance); + }); +} + +#[test] +fn runners_up_should_come_after() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); + fast_forward_to(2); + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + fast_forward_to(4); + assert_ok!(Democracy::vote(Origin::signed(1), 1, aye(1))); + fast_forward_to(6); + assert_ok!(Democracy::vote(Origin::signed(1), 2, aye(1))); + }); +} diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs new file mode 100644 index 0000000000000000000000000000000000000000..db9724deddc223d8895ce807f91ce65e9c020a8b --- /dev/null +++ b/frame/democracy/src/tests/scheduling.rs @@ -0,0 +1,111 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for functionality concerning normal starting, ending and enacting of referenda. + +use super::*; + +#[test] +fn simple_passing_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); + next_block(); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn simple_failing_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); +} + +#[test] +fn ooo_inject_referendums_should_work() { + new_test_ext().execute_with(|| { + let r1 = Democracy::inject_referendum( + 3, + set_balance_proposal_hash_and_note(3), + VoteThreshold::SuperMajorityApprove, + 0 + ); + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); + + next_block(); + assert_eq!(Balances::free_balance(42), 2); + + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); + + next_block(); + assert_eq!(Balances::free_balance(42), 3); + }); +} + +#[test] +fn delayed_enactment_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 1 + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, aye(6))); + + assert_eq!(tally(r), Tally { ayes: 21, nays: 0, turnout: 210 }); + + next_block(); + assert_eq!(Balances::free_balance(42), 0); + + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); +} diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs new file mode 100644 index 0000000000000000000000000000000000000000..43aed29a32d8bc8c0cc59ab0d84b7fcef44dd4aa --- /dev/null +++ b/frame/democracy/src/tests/voting.rs @@ -0,0 +1,165 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The tests for normal voting functionality. + +use super::*; + +#[test] +fn overvoting_should_fail() { + new_test_ext().execute_with(|| { + let r = begin_referendum(); + assert_noop!(Democracy::vote(Origin::signed(1), r, aye(2)), Error::::InsufficientFunds); + }); +} + +#[test] +fn split_voting_should_work() { + new_test_ext().execute_with(|| { + let r = begin_referendum(); + let v = AccountVote::Split { aye: 40, nay: 20 }; + assert_noop!(Democracy::vote(Origin::signed(5), r, v), Error::::InsufficientFunds); + let v = AccountVote::Split { aye: 30, nay: 20 }; + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + + assert_eq!(tally(r), Tally { ayes: 3, nays: 2, turnout: 50 }); + }); +} + +#[test] +fn split_vote_cancellation_should_work() { + new_test_ext().execute_with(|| { + let r = begin_referendum(); + let v = AccountVote::Split { aye: 30, nay: 20 }; + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); +} + +#[test] +fn single_proposal_should_work() { + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + let r = 0; + assert!(Democracy::referendum_info(r).is_none()); + + // start of 2 => next referendum scheduled. + fast_forward_to(2); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { ayes: 1, nays: 0, turnout: 10 }, + }) + ); + + fast_forward_to(3); + + // referendum still running + assert!(Democracy::referendum_status(0).is_ok()); + + // referendum runs during 2 and 3, ends @ start of 4. + fast_forward_to(4); + + assert!(Democracy::referendum_status(0).is_err()); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); + + // referendum passes and wait another two blocks for enactment. + fast_forward_to(6); + + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn controversial_voting_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + + assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, big_nay(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, big_nay(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + + assert_eq!(tally(r), Tally { ayes: 110, nays: 100, turnout: 210 }); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); +} + +#[test] +fn controversial_low_turnout_voting_should_work() { + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + + assert_eq!(tally(r), Tally { ayes: 60, nays: 50, turnout: 110 }); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); +} + +#[test] +fn passing_low_turnout_voting_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_eq!(Balances::total_issuance(), 210); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0 + ); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + assert_eq!(tally(r), Tally { ayes: 100, nays: 50, turnout: 150 }); + + next_block(); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); +} diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs new file mode 100644 index 0000000000000000000000000000000000000000..3454326364de60412048467b8602df796110e3fd --- /dev/null +++ b/frame/democracy/src/types.rs @@ -0,0 +1,224 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Miscellaneous additional datatypes. + +use codec::{Encode, Decode}; +use sp_runtime::RuntimeDebug; +use sp_runtime::traits::{Zero, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, Saturating}; +use crate::{Vote, VoteThreshold, AccountVote, Conviction}; + +/// Info regarding an ongoing referendum. +#[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Tally { + /// The number of aye votes, expressed in terms of post-conviction lock-vote. + pub (crate) ayes: Balance, + /// The number of nay votes, expressed in terms of post-conviction lock-vote. + pub (crate) nays: Balance, + /// The amount of funds currently expressing its opinion. Pre-conviction. + pub (crate) turnout: Balance, +} + +/// Amount of votes and capital placed in delegation for an account. +#[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct Delegations { + /// The number of votes (this is post-conviction). + pub (crate) votes: Balance, + /// The amount of raw capital, used for the turnout. + pub (crate) capital: Balance, +} + +impl Saturating for Delegations { + fn saturating_add(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_add(o.votes), + capital: self.capital.saturating_add(o.capital), + } + } + + fn saturating_sub(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_sub(o.votes), + capital: self.capital.saturating_sub(o.capital), + } + } + + fn saturating_mul(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_mul(o.votes), + capital: self.capital.saturating_mul(o.capital), + } + } + + fn saturating_pow(self, exp: usize) -> Self { + Self { + votes: self.votes.saturating_pow(exp), + capital: self.capital.saturating_pow(exp), + } + } +} + +impl< + Balance: From + Zero + Copy + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Bounded + + Saturating +> Tally { + /// Create a new tally. + pub fn new( + vote: Vote, + balance: Balance, + ) -> Self { + let Delegations { votes, capital } = vote.conviction.votes(balance); + Self { + ayes: if vote.aye { votes } else { Zero::zero() }, + nays: if vote.aye { Zero::zero() } else { votes }, + turnout: capital, + } + } + + /// Add an account's vote into the tally. + pub fn add( + &mut self, + vote: AccountVote, + ) -> Option<()> { + match vote { + AccountVote::Standard { vote, balance } => { + let Delegations { votes, capital } = vote.conviction.votes(balance); + self.turnout = self.turnout.checked_add(&capital)?; + match vote.aye { + true => self.ayes = self.ayes.checked_add(&votes)?, + false => self.nays = self.nays.checked_add(&votes)?, + } + } + AccountVote::Split { aye, nay } => { + let aye = Conviction::None.votes(aye); + let nay = Conviction::None.votes(nay); + self.turnout = self.turnout.checked_add(&aye.capital)?.checked_add(&nay.capital)?; + self.ayes = self.ayes.checked_add(&aye.votes)?; + self.nays = self.nays.checked_add(&nay.votes)?; + } + } + Some(()) + } + + /// Remove an account's vote from the tally. + pub fn remove( + &mut self, + vote: AccountVote, + ) -> Option<()> { + match vote { + AccountVote::Standard { vote, balance } => { + let Delegations { votes, capital } = vote.conviction.votes(balance); + self.turnout = self.turnout.checked_sub(&capital)?; + match vote.aye { + true => self.ayes = self.ayes.checked_sub(&votes)?, + false => self.nays = self.nays.checked_sub(&votes)?, + } + } + AccountVote::Split { aye, nay } => { + let aye = Conviction::None.votes(aye); + let nay = Conviction::None.votes(nay); + self.turnout = self.turnout.checked_sub(&aye.capital)?.checked_sub(&nay.capital)?; + self.ayes = self.ayes.checked_sub(&aye.votes)?; + self.nays = self.nays.checked_sub(&nay.votes)?; + } + } + Some(()) + } + + /// Increment some amount of votes. + pub fn increase(&mut self, approve: bool, delegations: Delegations) -> Option<()> { + self.turnout = self.turnout.saturating_add(delegations.capital); + match approve { + true => self.ayes = self.ayes.saturating_add(delegations.votes), + false => self.nays = self.nays.saturating_add(delegations.votes), + } + Some(()) + } + + /// Decrement some amount of votes. + pub fn reduce(&mut self, approve: bool, delegations: Delegations) -> Option<()> { + self.turnout = self.turnout.saturating_sub(delegations.capital); + match approve { + true => self.ayes = self.ayes.saturating_sub(delegations.votes), + false => self.nays = self.nays.saturating_sub(delegations.votes), + } + Some(()) + } +} + +/// Info regarding an ongoing referendum. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub struct ReferendumStatus { + /// When voting on this referendum will end. + pub (crate) end: BlockNumber, + /// The hash of the proposal being voted on. + pub (crate) proposal_hash: Hash, + /// The thresholding mechanism to determine whether it passed. + pub (crate) threshold: VoteThreshold, + /// The delay (in blocks) to wait after a successful referendum before deploying. + pub (crate) delay: BlockNumber, + /// The current tally of votes in this referendum. + pub (crate) tally: Tally, +} + +/// Info regarding a referendum, present or past. +#[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] +pub enum ReferendumInfo { + /// Referendum is happening, the arg is the block number at which it will end. + Ongoing(ReferendumStatus), + /// Referendum finished at `end`, and has been `approved` or rejected. + Finished{approved: bool, end: BlockNumber}, +} + +impl ReferendumInfo { + /// Create a new instance. + pub fn new( + end: BlockNumber, + proposal_hash: Hash, + threshold: VoteThreshold, + delay: BlockNumber, + ) -> Self { + let s = ReferendumStatus{ end, proposal_hash, threshold, delay, tally: Tally::default() }; + ReferendumInfo::Ongoing(s) + } +} + +/// State of a proxy voting account. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] +pub enum ProxyState { + /// Account is open to becoming a proxy but is not yet assigned. + Open(AccountId), + /// Account is actively being a proxy. + Active(AccountId), +} + +impl ProxyState { + pub (crate) fn as_active(self) -> Option { + match self { + ProxyState::Active(a) => Some(a), + ProxyState::Open(_) => None, + } + } +} + +/// Whether an `unvote` operation is able to make actions that are not strictly always in the +/// interest of an account. +pub enum UnvoteScope { + /// Permitted to do everything. + Any, + /// Permitted to do only the changes that do not need the owner's permission. + OnlyExpired, +} diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs new file mode 100644 index 0000000000000000000000000000000000000000..a41eb342aa12cb1ee862a0119a49f97620a0620d --- /dev/null +++ b/frame/democracy/src/vote.rs @@ -0,0 +1,181 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! The vote datatype. + +use sp_std::{prelude::*, result::Result, convert::TryFrom}; +use codec::{Encode, EncodeLike, Decode, Output, Input}; +use sp_runtime::{RuntimeDebug, traits::{Saturating, Zero}}; +use crate::{Conviction, ReferendumIndex, Delegations}; + +/// A number of lock periods, plus a vote, one way or the other. +#[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] +pub struct Vote { + pub aye: bool, + pub conviction: Conviction, +} + +impl Encode for Vote { + fn encode_to(&self, output: &mut T) { + output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); + } +} + +impl EncodeLike for Vote {} + +impl Decode for Vote { + fn decode(input: &mut I) -> Result { + let b = input.read_byte()?; + Ok(Vote { + aye: (b & 0b1000_0000) == 0b1000_0000, + conviction: Conviction::try_from(b & 0b0111_1111) + .map_err(|_| codec::Error::from("Invalid conviction"))?, + }) + } +} + +/// A vote for a referendum of a particular account. +#[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug)] +pub enum AccountVote { + /// A standard vote, one-way (approve or reject) with a given amount of conviction. + Standard { vote: Vote, balance: Balance }, + /// A split vote with balances given for both ways, and with no conviction, useful for + /// parachains when voting. + Split { aye: Balance, nay: Balance }, +} + +impl AccountVote { + /// Returns `Some` of the lock periods that the account is locked for, assuming that the + /// referendum passed iff `approved` is `true`. + pub fn locked_if(self, approved: bool) -> Option<(u32, Balance)> { + // winning side: can only be removed after the lock period ends. + match self { + AccountVote::Standard { vote, balance } if vote.aye == approved => + Some((vote.conviction.lock_periods(), balance)), + _ => None, + } + } + + /// The total balance involved in this vote. + pub fn balance(self) -> Balance { + match self { + AccountVote::Standard { balance, .. } => balance, + AccountVote::Split { aye, nay } => aye.saturating_add(nay), + } + } + + /// Returns `Some` with whether the vote is an aye vote if it is standard, otherwise `None` if + /// it is split. + pub fn as_standard(self) -> Option { + match self { + AccountVote::Standard { vote, .. } => Some(vote.aye), + _ => None, + } + } +} + +/// A "prior" lock, i.e. a lock for some now-forgotten reason. +#[derive(Encode, Decode, Default, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] +pub struct PriorLock(BlockNumber, Balance); + +impl PriorLock { + /// Accumulates an additional lock. + pub fn accumulate(&mut self, until: BlockNumber, amount: Balance) { + self.0 = self.0.max(until); + self.1 = self.1.max(amount); + } + + pub fn locked(&self) -> Balance { + self.1 + } + + pub fn rejig(&mut self, now: BlockNumber) { + if now >= self.0 { + self.0 = Zero::zero(); + self.1 = Zero::zero(); + } + } +} + +/// An indicator for what an account is doing; it can either be delegating or voting. +#[derive(Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug)] +pub enum Voting { + /// The account is voting directly. `delegations` is the total amount of post-conviction voting + /// weight that it controls from those that have delegated to it. + Direct { + /// The current votes of the account. + votes: Vec<(ReferendumIndex, AccountVote)>, + /// The total amount of delegations that this account has received. + delegations: Delegations, + /// Any pre-existing locks from past voting/delegating activity. + prior: PriorLock, + }, + /// The account is delegating `balance` of its balance to a `target` account with `conviction`. + Delegating { + balance: Balance, + target: AccountId, + conviction: Conviction, + /// The total amount of delegations that this account has received. + delegations: Delegations, + /// Any pre-existing locks from past voting/delegating activity. + prior: PriorLock, + }, +} + +impl Default for Voting { + fn default() -> Self { + Voting::Direct { + votes: Vec::new(), + delegations: Default::default(), + prior: PriorLock(Zero::zero(), Default::default()), + } + } +} + +impl< + Balance: Saturating + Ord + Zero + Copy, + BlockNumber: Ord + Copy + Zero, + AccountId, +> Voting { + pub fn rejig(&mut self, now: BlockNumber) { + match self { + Voting::Direct { prior, .. } => prior, + Voting::Delegating { prior, .. } => prior, + }.rejig(now); + } + + /// The amount of this account's balance that much currently be locked due to voting. + pub fn locked_balance(&self) -> Balance { + match self { + Voting::Direct { votes, prior, .. } => votes.iter() + .map(|i| i.1.balance()) + .fold(prior.locked(), |a, i| a.max(i)), + Voting::Delegating { balance, .. } => *balance, + } + } + + pub fn set_common(&mut self, + delegations: Delegations, + prior: PriorLock + ) { + let (d, p) = match self { + Voting::Direct { ref mut delegations, ref mut prior, .. } => (delegations, prior), + Voting::Delegating { ref mut delegations, ref mut prior, .. } => (delegations, prior), + }; + *d = delegations; + *p = prior; + } +} diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index 46612af09abc5fd161e4583e31b9c616782f602f..fd976b44001cf189d497ec81c9fba789843e72d0 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -21,6 +21,7 @@ use serde::{Serialize, Deserialize}; use codec::{Encode, Decode}; use sp_runtime::traits::{Zero, IntegerSquareRoot}; use sp_std::ops::{Add, Mul, Div, Rem}; +use crate::Tally; /// A means of determining if a vote is past pass threshold. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] @@ -35,10 +36,9 @@ pub enum VoteThreshold { } pub trait Approved { - /// Given `approve` votes for and `against` votes against from a total electorate size of - /// `electorate` (`electorate - (approve + against)` are abstainers), then returns true if the - /// overall outcome is in favor of approval. - fn approved(&self, approve: Balance, against: Balance, voters: Balance, electorate: Balance) -> bool; + /// Given a `tally` of votes and a total size of `electorate`, this returns `true` if the + /// overall outcome is in favor of approval according to `self`'s threshold method. + fn approved(&self, tally: Tally, electorate: Balance) -> bool; } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. @@ -69,23 +69,21 @@ fn compare_rationals + Div + Rem + Mul + Div + Rem + Copy> Approved for VoteThreshold { - /// Given `approve` votes for and `against` votes against from a total electorate size of - /// `electorate` of whom `voters` voted (`electorate - voters` are abstainers) then returns true if the - /// overall outcome is in favor of approval. - /// - /// We assume each *voter* may cast more than one *vote*, hence `voters` is not necessarily equal to - /// `approve + against`. - fn approved(&self, approve: Balance, against: Balance, voters: Balance, electorate: Balance) -> bool { - let sqrt_voters = voters.integer_sqrt(); +impl< + Balance: IntegerSquareRoot + Zero + Ord + Add + + Mul + Div + + Rem + Copy, +> Approved for VoteThreshold { + fn approved(&self, tally: Tally, electorate: Balance) -> bool { + let sqrt_voters = tally.turnout.integer_sqrt(); let sqrt_electorate = electorate.integer_sqrt(); if sqrt_voters.is_zero() { return false; } match *self { VoteThreshold::SuperMajorityApprove => - compare_rationals(against, sqrt_voters, approve, sqrt_electorate), + compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), VoteThreshold::SuperMajorityAgainst => - compare_rationals(against, sqrt_electorate, approve, sqrt_voters), - VoteThreshold::SimpleMajority => approve > against, + compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters), + VoteThreshold::SimpleMajority => tally.ayes > tally.nays, } } } @@ -96,7 +94,7 @@ mod tests { #[test] fn should_work() { - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(60, 50, 110, 210), false); - assert_eq!(VoteThreshold::SuperMajorityApprove.approved(100, 50, 150, 210), true); + assert!(!VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 60, nays: 50, turnout: 110}, 210)); + assert!(VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 100, nays: 50, turnout: 150}, 210)); } } diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index 62ae091e9a41dea8fa039e48d329d571659b3852..fb219bbebc33d5a864e3d0beee08e32e07e2eab5 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,24 +9,26 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME election pallet for PHRAGMEN" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/phragmen" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +serde = { version = "1.0.101", optional = true } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-phragmen = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/phragmen" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } -serde = { version = "1.0.101" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +pallet-scheduler = { version = "2.0.0-alpha.5", path = "../scheduler" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +substrate-test-utils = { version = "2.0.0-alpha.5", path = "../../test-utils" } [features] default = ["std"] std = [ + "serde", "codec/std", "frame-support/std", "sp-runtime/std", @@ -34,3 +36,7 @@ std = [ "frame-system/std", "sp-std/std", ] +runtime-benchmarks = ["frame-support/runtime-benchmarks"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 55b7b3f1280d30df19545143cb7a8972df52f71c..036a5f492c19c94c9f4ca980bb1de0846074bb37 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -84,17 +84,17 @@ use sp_std::prelude::*; use sp_runtime::{ - print, DispatchResult, DispatchError, Perbill, - traits::{Zero, StaticLookup, Convert}, + print, DispatchResult, DispatchError, Perbill, traits::{Zero, StaticLookup, Convert}, }; use frame_support::{ - decl_storage, decl_event, ensure, decl_module, decl_error, weights::SimpleDispatchInfo, + decl_storage, decl_event, ensure, decl_module, decl_error, + weights::{SimpleDispatchInfo, Weight, WeighData}, storage::{StorageMap, IterableStorageMap}, traits::{ Currency, Get, LockableCurrency, LockIdentifier, ReservableCurrency, WithdrawReasons, - ChangeMembers, OnUnbalanced, WithdrawReason, Contains, BalanceStatus + ChangeMembers, OnUnbalanced, WithdrawReason, Contains, BalanceStatus, InitializeMembers, } }; -use sp_phragmen::ExtendedBalance; +use sp_phragmen::{build_support_map, ExtendedBalance}; use frame_system::{self as system, ensure_signed, ensure_root}; const MODULE_ID: LockIdentifier = *b"phrelect"; @@ -118,6 +118,9 @@ pub trait Trait: frame_system::Trait { /// What to do when the members change. type ChangeMembers: ChangeMembers; + /// What to do with genesis members + type InitializeMembers: InitializeMembers; + /// Convert a balance into a number used for election calculation. /// This must fit into a `u64` but is allowed to be sensibly lossy. type CurrencyToVote: Convert, u64> + Convert>; @@ -159,14 +162,49 @@ decl_storage! { /// The total number of vote rounds that have happened, excluding the upcoming one. pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); - /// Votes of a particular voter, with the round index of the votes. - pub VotesOf get(fn votes_of): linked_map hasher(blake2_256) T::AccountId => Vec; - /// Locked stake of a voter. - pub StakeOf get(fn stake_of): map hasher(blake2_256) T::AccountId => BalanceOf; + /// Votes and locked stake of a particular voter. + pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - /// The present candidate list. Sorted based on account-id. A current member or a runner can - /// never enter this vector and is always implicitly assumed to be a candidate. + /// The present candidate list. Sorted based on account-id. A current member or runner-up + /// can never enter this vector and is always implicitly assumed to be a candidate. pub Candidates get(fn candidates): Vec; + } add_extra_genesis { + config(members): Vec<(T::AccountId, BalanceOf)>; + build(|config: &GenesisConfig| { + let members = config.members.iter().map(|(ref member, ref stake)| { + // make sure they have enough stake + assert!( + T::Currency::free_balance(member) >= *stake, + "Genesis member does not have enough stake", + ); + + // reserve candidacy bond and set as members. + T::Currency::reserve(&member, T::CandidacyBond::get()) + .expect("Genesis member does not have enough balance to be a candidate"); + + // Note: all members will only vote for themselves, hence they must be given exactly + // their own stake as total backing. Any sane election should behave as such. + // Nonetheless, stakes will be updated for term 1 onwards according to the election. + Members::::mutate(|members| { + match members.binary_search_by(|(a, _b)| a.cmp(member)) { + Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member), + Err(pos) => members.insert(pos, (member.clone(), *stake)), + } + }); + + // set self-votes to make persistent. + >::vote( + T::Origin::from(Some(member.clone()).into()), + vec![member.clone()], + *stake, + ).expect("Genesis member could not vote."); + + member.clone() + }).collect::>(); + + // report genesis members to upstream, if any. + T::InitializeMembers::initialize_members(&members); + }) } } @@ -204,12 +242,34 @@ decl_error! { } } +mod migration { + use super::*; + use frame_support::{migration::{StorageKeyIterator, take_storage_item}, Twox64Concat}; + pub fn migrate() { + for (who, votes) in StorageKeyIterator + ::, Twox64Concat> + ::new(b"PhragmenElection", b"VotesOf") + .drain() + { + if let Some(stake) = take_storage_item::<_, BalanceOf, Twox64Concat>(b"PhragmenElection", b"StakeOf", &who) { + Voting::::insert(who, (stake, votes)); + } + } + } +} + decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; fn deposit_event() = default; + fn on_runtime_upgrade() -> Weight { + migration::migrate::(); + + SimpleDispatchInfo::default().weigh_data(()) + } + const CandidacyBond: BalanceOf = T::CandidacyBond::get(); const VotingBond: BalanceOf = T::VotingBond::get(); const DesiredMembers: u32 = T::DesiredMembers::get(); @@ -265,8 +325,8 @@ decl_module! { locked_balance, WithdrawReasons::except(WithdrawReason::TransactionPayment), ); - >::insert(&who, locked_balance); - >::insert(&who, votes); + + Voting::::insert(&who, (locked_balance, votes)); } /// Remove `origin` as a voter. This removes the lock and returns the bond. @@ -444,11 +504,13 @@ decl_module! { } /// What to do at the end of each block. Checks if an election needs to happen or not. - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { if let Err(e) = Self::end_block(n) { print("Guru meditation"); print(e); } + + SimpleDispatchInfo::default().weigh_data(()) } } } @@ -523,7 +585,7 @@ impl Module { /// /// State: O(1). fn is_voter(who: &T::AccountId) -> bool { - >::contains_key(who) + Voting::::contains_key(who) } /// Check if `who` is currently an active member. @@ -586,8 +648,7 @@ impl Module { /// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`. fn do_remove_voter(who: &T::AccountId, unreserve: bool) { // remove storage and lock. - >::remove(who); - >::remove(who); + Voting::::remove(who); T::Currency::remove_lock(MODULE_ID, who); if unreserve { @@ -597,7 +658,12 @@ impl Module { /// The locked stake of a voter. fn locked_stake_of(who: &T::AccountId) -> BalanceOf { - Self::stake_of(who) + Voting::::get(who).0 + } + + /// The locked stake of a voter. + fn votes_of(who: &T::AccountId) -> Vec { + Voting::::get(who).1 } /// Check there's nothing to do this block. @@ -628,7 +694,7 @@ impl Module { let num_to_elect = desired_runners_up + desired_seats; let mut candidates = Self::candidates(); - // candidates who explicitly called `submit_candidacy`. Only these folks are at the risk of + // candidates who explicitly called `submit_candidacy`. Only these folks are at risk of // losing their bond. let exposed_candidates = candidates.clone(); // current members are always a candidate for the next round as well. @@ -637,15 +703,14 @@ impl Module { // previous runners_up are also always candidates for the next round. candidates.append(&mut Self::runners_up_ids()); - let voters_and_votes = >::enumerate() - .map(|(v, i)| (v, i)) - .collect::)>>(); - let maybe_phragmen_result = sp_phragmen::elect::<_, _, _, T::CurrencyToVote, Perbill>( + let voters_and_votes = Voting::::iter() + .map(|(voter, (stake, targets))| { (voter, stake, targets) }) + .collect::>(); + let maybe_phragmen_result = sp_phragmen::elect::<_, _, T::CurrencyToVote, Perbill>( num_to_elect, 0, candidates, - voters_and_votes, - Self::locked_stake_of, + voters_and_votes.clone(), ); if let Some(phragmen_result) = maybe_phragmen_result { @@ -667,12 +732,18 @@ impl Module { .filter_map(|(m, a)| if a.is_zero() { None } else { Some(m) } ) .collect::>(); - let support_map = sp_phragmen::build_support_map::<_, _, _, T::CurrencyToVote, Perbill>( - &new_set, - &phragmen_result.assignments, - Self::locked_stake_of, + let stake_of = |who: &T::AccountId| -> ExtendedBalance { + , u64>>::convert( + Self::locked_stake_of(who) + ) as ExtendedBalance + }; + let staked_assignments = sp_phragmen::assignment_ratio_to_staked( + phragmen_result.assignments, + stake_of, ); + let (support_map, _) = build_support_map::(&new_set, &staked_assignments); + let to_balance = |e: ExtendedBalance| >>::convert(e); let new_set_with_stake = new_set @@ -690,12 +761,24 @@ impl Module { // split new set into winners and runners up. let split_point = desired_seats.min(new_set_with_stake.len()); let mut new_members = (&new_set_with_stake[..split_point]).to_vec(); - let most_popular = new_members.first().map(|x| x.0.clone()); // save the runners up as-is. They are sorted based on desirability. - // sort and save the members. + // save the members, sorted based on account id. new_members.sort_by(|i, j| i.0.cmp(&j.0)); + let mut prime_votes: Vec<_> = new_members.iter().map(|c| (&c.0, BalanceOf::::zero())).collect(); + for (_, stake, targets) in voters_and_votes.into_iter() { + for (votes, who) in targets.iter() + .enumerate() + .map(|(votes, who)| ((MAXIMUM_VOTE - votes) as u32, who)) + { + if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { + prime_votes[i].1 += stake * votes.into(); + } + } + } + let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone()); + // new_members_ids is sorted by account id. let new_members_ids = new_members .iter() @@ -723,7 +806,7 @@ impl Module { &outgoing.clone(), &new_members_ids, ); - T::ChangeMembers::set_prime(most_popular); + T::ChangeMembers::set_prime(prime); // outgoing candidates lose their bond. let mut to_burn_bond = outgoing.to_vec(); @@ -776,6 +859,18 @@ impl Contains for Module { Self::is_member(who) } fn sorted_members() -> Vec { Self::members_ids() } + + // A special function to populate members in this pallet for passing Origin + // checks in runtime benchmarking. + #[cfg(feature = "runtime-benchmarks")] + fn add(who: &T::AccountId) { + Members::::mutate(|members| { + match members.binary_search_by(|(a, _b)| a.cmp(who)) { + Ok(_) => (), + Err(pos) => members.insert(pos, (who.clone(), BalanceOf::::default())), + } + }) + } } #[cfg(test)] @@ -789,7 +884,7 @@ mod tests { Perbill, testing::Header, BuildStorage, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, }; - use crate as elections; + use crate as elections_phragmen; use frame_system as system; parameter_types! { @@ -925,6 +1020,7 @@ mod tests { type Currency = Balances; type CurrencyToVote = CurrencyToVoteHandler; type ChangeMembers = TestChangeMembers; + type InitializeMembers = (); type CandidacyBond = CandidacyBond; type VotingBond = VotingBond; type TermDuration = TermDuration; @@ -946,11 +1042,12 @@ mod tests { { System: system::{Module, Call, Event}, Balances: pallet_balances::{Module, Call, Event, Config}, - Elections: elections::{Module, Call, Event}, + Elections: elections_phragmen::{Module, Call, Event, Config}, } ); pub struct ExtBuilder { + genesis_members: Vec<(u64, u64)>, balance_factor: u64, voter_bond: u64, term_duration: u64, @@ -960,6 +1057,7 @@ mod tests { impl Default for ExtBuilder { fn default() -> Self { Self { + genesis_members: vec![], balance_factor: 1, voter_bond: 2, desired_runners_up: 0, @@ -981,11 +1079,16 @@ mod tests { self.term_duration = duration; self } + pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { + self.genesis_members = members; + self + } pub fn build(self) -> sp_io::TestExternalities { VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond); TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration); DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up); - GenesisConfig { + MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); + let mut ext: sp_io::TestExternalities = GenesisConfig { pallet_balances: Some(pallet_balances::GenesisConfig::{ balances: vec![ (1, 10 * self.balance_factor), @@ -996,12 +1099,17 @@ mod tests { (6, 60 * self.balance_factor) ], }), - }.build_storage().unwrap().into() + elections_phragmen: Some(elections_phragmen::GenesisConfig:: { + members: self.genesis_members + }), + }.build_storage().unwrap().into(); + ext.execute_with(|| System::set_block_number(1)); + ext } } fn all_voters() -> Vec { - >::enumerate().map(|(v, _)| v).collect::>() + Voting::::iter().map(|(v, _)| v).collect::>() } fn balances(who: &u64) -> (u64, u64) { @@ -1017,7 +1125,6 @@ mod tests { #[test] fn params_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::desired_members(), 2); assert_eq!(Elections::term_duration(), 5); assert_eq!(Elections::election_rounds(), 0); @@ -1034,6 +1141,60 @@ mod tests { }); } + #[test] + fn genesis_members_should_work() { + ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build().execute_with(|| { + System::set_block_number(1); + assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + + assert_eq!(Elections::voting(1), (10, vec![1])); + assert_eq!(Elections::voting(2), (20, vec![2])); + + // they will persist since they have self vote. + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![1, 2]); + }) + } + + #[test] + fn genesis_members_unsorted_should_work() { + ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build().execute_with(|| { + System::set_block_number(1); + assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + + assert_eq!(Elections::voting(1), (10, vec![1])); + assert_eq!(Elections::voting(2), (20, vec![2])); + + // they will persist since they have self vote. + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![1, 2]); + }) + } + + #[test] + #[should_panic = "Genesis member does not have enough stake"] + fn genesis_members_cannot_over_stake_0() { + // 10 cannot lock 20 as their stake and extra genesis will panic. + ExtBuilder::default().genesis_members(vec![(1, 20), (2, 20)]).build(); + } + + #[test] + #[should_panic] + fn genesis_members_cannot_over_stake_1() { + // 10 cannot reserve 20 as voting bond and extra genesis will panic. + ExtBuilder::default().voter_bond(20).genesis_members(vec![(1, 10), (2, 20)]).build(); + } + + #[test] + #[should_panic = "Duplicate member in elections phragmen genesis: 2"] + fn genesis_members_cannot_be_duplicate() { + ExtBuilder::default().genesis_members(vec![(1, 10), (2, 10), (2, 10)]).build(); + } + #[test] fn term_duration_zero_is_passive() { ExtBuilder::default() @@ -1041,7 +1202,6 @@ mod tests { .build() .execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::term_duration(), 0); assert_eq!(Elections::desired_members(), 2); assert_eq!(Elections::election_rounds(), 0); @@ -1220,13 +1380,13 @@ mod tests { assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 20); - assert_eq!(Elections::stake_of(2), 20); + assert_eq!(Elections::locked_stake_of(&2), 20); // can update; different stake; different lock and reserve. assert_ok!(Elections::vote(Origin::signed(2), vec![5, 4], 15)); assert_eq!(balances(&2), (18, 2)); assert_eq!(has_lock(&2), 15); - assert_eq!(Elections::stake_of(2), 15); + assert_eq!(Elections::locked_stake_of(&2), 15); }); } @@ -1282,6 +1442,31 @@ mod tests { }); } + #[test] + fn prime_votes_for_exiting_members_are_removed() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + }); + } + #[test] fn cannot_vote_for_more_than_candidates() { ExtBuilder::default().build().execute_with(|| { @@ -1316,7 +1501,7 @@ mod tests { assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 30)); // you can lie but won't get away with it. - assert_eq!(Elections::stake_of(2), 20); + assert_eq!(Elections::locked_stake_of(&2), 20); assert_eq!(has_lock(&2), 20); }); } @@ -1330,16 +1515,16 @@ mod tests { assert_ok!(Elections::vote(Origin::signed(3), vec![5], 30)); assert_eq_uvec!(all_voters(), vec![2, 3]); - assert_eq!(Elections::stake_of(2), 20); - assert_eq!(Elections::stake_of(3), 30); - assert_eq!(Elections::votes_of(2), vec![5]); - assert_eq!(Elections::votes_of(3), vec![5]); + assert_eq!(Elections::locked_stake_of(&2), 20); + assert_eq!(Elections::locked_stake_of(&3), 30); + assert_eq!(Elections::votes_of(&2), vec![5]); + assert_eq!(Elections::votes_of(&3), vec![5]); assert_ok!(Elections::remove_voter(Origin::signed(2))); assert_eq_uvec!(all_voters(), vec![3]); - assert_eq!(Elections::votes_of(2), vec![]); - assert_eq!(Elections::stake_of(2), 0); + assert_eq!(Elections::votes_of(&2), vec![]); + assert_eq!(Elections::locked_stake_of(&2), 0); assert_eq!(balances(&2), (20, 0)); assert_eq!(Balances::locks(&2).len(), 0); @@ -1457,10 +1642,9 @@ mod tests { assert_eq!(balances(&5), (45, 5)); assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 3)); - assert_eq!( - System::events()[7].event, - Event::elections(RawEvent::VoterReported(3, 5, true)) - ); + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true)) + })); assert_eq!(balances(&3), (28, 0)); assert_eq!(balances(&5), (47, 5)); @@ -1486,10 +1670,9 @@ mod tests { assert_eq!(balances(&5), (45, 5)); assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 4)); - assert_eq!( - System::events()[7].event, - Event::elections(RawEvent::VoterReported(4, 5, false)) - ); + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false)) + })); assert_eq!(balances(&4), (35, 5)); assert_eq!(balances(&5), (45, 3)); @@ -1510,9 +1693,9 @@ mod tests { assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - assert_eq!(Elections::votes_of(2), vec![5]); - assert_eq!(Elections::votes_of(3), vec![3]); - assert_eq!(Elections::votes_of(4), vec![4]); + assert_eq!(Elections::votes_of(&2), vec![5]); + assert_eq!(Elections::votes_of(&3), vec![3]); + assert_eq!(Elections::votes_of(&4), vec![4]); assert_eq!(Elections::candidates(), vec![3, 4, 5]); assert_eq!(>::decode_len().unwrap(), 3); @@ -1897,10 +2080,9 @@ mod tests { // 5 is an outgoing loser. will also get slashed. assert_eq!(balances(&5), (45, 2)); - assert_eq!( - System::events()[6].event, - Event::elections(RawEvent::NewTerm(vec![(4, 40), (5, 50)])), - ); + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])) + })); }) } diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index e65f4e5d4627827380cbb06cc20732e0de40a5dc..6043ac4681e212afb462540552af02639c8af4bd 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-elections" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,17 @@ description = "FRAME pallet for elections" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -34,3 +34,6 @@ std = [ "sp-runtime/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index d93d18fe23f64c049bc1337acb3f538cbf208ec7..a8ea0b8c2e4555d8990ba485e749f3c49b11024c 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -30,7 +30,7 @@ use sp_runtime::{ }; use frame_support::{ decl_storage, decl_event, ensure, decl_module, decl_error, - weights::SimpleDispatchInfo, + weights::{Weight, SimpleDispatchInfo, WeighData}, traits::{ Currency, ExistenceRequirement, Get, LockableCurrency, LockIdentifier, BalanceStatus, OnUnbalanced, ReservableCurrency, WithdrawReason, WithdrawReasons, ChangeMembers @@ -236,16 +236,16 @@ decl_storage! { // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of // `APPROVAL_SET_SIZE`. pub ApprovalsOf get(fn approvals_of): - map hasher(blake2_256) (T::AccountId, SetIndex) => Vec; + map hasher(twox_64_concat) (T::AccountId, SetIndex) => Vec; /// The vote index and list slot that the candidate `who` was registered or `None` if they /// are not currently registered. pub RegisterInfoOf get(fn candidate_reg_info): - map hasher(blake2_256) T::AccountId => Option<(VoteIndex, u32)>; + map hasher(twox_64_concat) T::AccountId => Option<(VoteIndex, u32)>; /// Basic information about a voter. pub VoterInfoOf get(fn voter_info): - map hasher(blake2_256) T::AccountId => Option>>; + map hasher(twox_64_concat) T::AccountId => Option>>; /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). - pub Voters get(fn voters): map hasher(blake2_256) SetIndex => Vec>; + pub Voters get(fn voters): map hasher(twox_64_concat) SetIndex => Vec>; /// the next free set to store a voter in. This will keep growing. pub NextVoterSet get(fn next_nonfull_voter_set): SetIndex = 0; /// Current number of Voters. @@ -266,7 +266,7 @@ decl_storage! { /// Who is able to vote for whom. Value is the fund-holding account, key is the /// vote-transaction-sending account. - pub Proxy get(fn proxy): map hasher(blake2_256) T::AccountId => Option; + pub Proxy get(fn proxy): map hasher(blake2_128_concat) T::AccountId => Option; } } @@ -698,11 +698,12 @@ decl_module! { >::put(count); } - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { if let Err(e) = Self::end_block(n) { print("Guru meditation"); print(e); } + SimpleDispatchInfo::default().weigh_data(()) } } } diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index b82e73d512aa6eb254a1eda22ff801b7ad768a95..2898be26ca3001200d1806297b4695a655e212ea 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -208,7 +208,7 @@ impl ExtBuilder { VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); - GenesisConfig { + let mut ext: sp_io::TestExternalities = GenesisConfig { pallet_balances: Some(pallet_balances::GenesisConfig::{ balances: vec![ (1, 10 * self.balance_factor), @@ -225,7 +225,9 @@ impl ExtBuilder { presentation_duration: 2, term_duration: 5, }), - }.build_storage().unwrap().into() + }.build_storage().unwrap().into(); + ext.execute_with(|| System::set_block_number(1)); + ext } } diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index e26f0312903fcb61c7b94fe2d93e7ac0da77e86b..64b01f12e0cfe8c3b9971bee6e3772f4a2fb2adf 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -26,7 +26,6 @@ use frame_support::{assert_ok, assert_err, assert_noop}; #[test] fn params_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::next_vote_from(1), 4); assert_eq!(Elections::next_vote_from(4), 4); assert_eq!(Elections::next_vote_from(5), 8); @@ -408,8 +407,6 @@ fn voting_locking_stake_and_reserving_bond_works() { #[test] fn voting_without_any_candidate_count_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_eq!(Elections::candidates().len(), 0); assert_noop!( @@ -422,8 +419,6 @@ fn voting_without_any_candidate_count_should_not_work() { #[test] fn voting_setting_an_approval_vote_count_more_than_candidate_count_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(Elections::candidates().len(), 1); @@ -437,8 +432,6 @@ fn voting_setting_an_approval_vote_count_more_than_candidate_count_should_not_wo #[test] fn voting_resubmitting_approvals_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true], 0, 0, 40)); @@ -456,8 +449,6 @@ fn voting_resubmitting_approvals_should_work() { #[test] fn voting_retracting_voter_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_eq!(Elections::candidates().len(), 1); @@ -501,7 +492,6 @@ fn voting_retracting_voter_should_work() { #[test] fn voting_invalid_retraction_index_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); @@ -514,7 +504,6 @@ fn voting_invalid_retraction_index_should_not_work() { #[test] fn voting_overflow_retraction_index_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); @@ -525,7 +514,6 @@ fn voting_overflow_retraction_index_should_not_work() { #[test] fn voting_non_voter_retraction_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); @@ -740,7 +728,6 @@ fn retracting_inactive_voter_by_nonvoter_should_not_work() { #[test] fn candidacy_simple_candidate_submission_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), Vec::::new()); assert_eq!(Elections::candidate_reg_info(1), None); assert_eq!(Elections::candidate_reg_info(2), None); @@ -768,7 +755,6 @@ fn candidacy_submission_using_free_slot_should_work() { let mut t = new_test_ext_with_candidate_holes(); t.execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), vec![0, 0, 1]); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); @@ -784,7 +770,6 @@ fn candidacy_submission_using_alternative_free_slot_should_work() { let mut t = new_test_ext_with_candidate_holes(); t.execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), vec![0, 0, 1]); assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); @@ -800,7 +785,6 @@ fn candidacy_submission_not_using_free_slot_should_not_work() { let mut t = new_test_ext_with_candidate_holes(); t.execute_with(|| { - System::set_block_number(1); assert_noop!( Elections::submit_candidacy(Origin::signed(4), 3), Error::::InvalidCandidateSlot @@ -811,7 +795,6 @@ fn candidacy_submission_not_using_free_slot_should_not_work() { #[test] fn candidacy_bad_candidate_slot_submission_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), Vec::::new()); assert_noop!( Elections::submit_candidacy(Origin::signed(1), 1), @@ -823,7 +806,6 @@ fn candidacy_bad_candidate_slot_submission_should_not_work() { #[test] fn candidacy_non_free_candidate_slot_submission_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), Vec::::new()); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); assert_eq!(Elections::candidates(), vec![1]); @@ -837,7 +819,6 @@ fn candidacy_non_free_candidate_slot_submission_should_not_work() { #[test] fn candidacy_dupe_candidate_submission_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), Vec::::new()); assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); assert_eq!(Elections::candidates(), vec![1]); @@ -851,7 +832,6 @@ fn candidacy_dupe_candidate_submission_should_not_work() { #[test] fn candidacy_poor_candidate_submission_should_not_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); assert_eq!(Elections::candidates(), Vec::::new()); assert_noop!( Elections::submit_candidacy(Origin::signed(7), 0), @@ -863,8 +843,6 @@ fn candidacy_poor_candidate_submission_should_not_work() { #[test] fn election_voting_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); @@ -892,8 +870,6 @@ fn election_voting_should_work() { #[test] fn election_proxy_voting_should_work() { ExtBuilder::default().build().execute_with(|| { - System::set_block_number(1); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); >::insert(11, 1); diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 76407d57e42846394039d3f9af9f5dfb70f2016d..95b3c24d8796e683c30c60d6a2cb3e3a5ffd4990 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-evm" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,18 +10,18 @@ description = "FRAME EVM contracts pallet" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -primitive-types = { version = "0.6.2", default-features = false, features = ["rlp"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../timestamp" } +pallet-balances = { version = "2.0.0-alpha.5", default-features = false, path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +primitive-types = { version = "0.7.0", default-features = false, features = ["rlp"] } rlp = { version = "0.4", default-features = false } -evm = { version = "0.15", default-features = false } +evm = { version = "0.16", default-features = false } sha3 = { version = "0.8", default-features = false } [features] @@ -42,3 +42,6 @@ std = [ "evm/std", "pallet-timestamp/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs index d72c8b785e3468762054c9bd028fe3e3a1b730db..c610f24bb1db2d763979e978dd183c5719a731b9 100644 --- a/frame/evm/src/backend.rs +++ b/frame/evm/src/backend.rs @@ -7,7 +7,6 @@ use sp_core::{U256, H256, H160}; use sp_runtime::traits::UniqueSaturatedInto; use frame_support::storage::{StorageMap, StorageDoubleMap}; use sha3::{Keccak256, Digest}; -use evm::Config; use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; use crate::{Trait, Accounts, AccountStorages, AccountCodes, Module, Event}; @@ -43,10 +42,6 @@ pub struct Vicinity { pub origin: H160, } -/// Gasometer config used for executor. Currently this is hard-coded to -/// Istanbul hard fork. -pub static GASOMETER_CONFIG: Config = Config::istanbul(); - /// Substrate backend for EVM. pub struct Backend<'vicinity, T> { vicinity: &'vicinity Vicinity, @@ -177,7 +172,7 @@ impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { } for log in logs { - Module::::deposit_event(Event::Log(Log { + Module::::deposit_event(Event::::Log(Log { address: log.address, topics: log.topics, data: log.data, diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index df0ddaafbbe164cb1ddaee8d647961f90b996c7e..a50c545a461c1937d9d58eb06a86ff372558ca66 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -35,7 +35,7 @@ use sp_runtime::{ DispatchResult, traits::{UniqueSaturatedInto, AccountIdConversion, SaturatedConversion}, }; use sha3::{Digest, Keccak256}; -use evm::{ExitReason, ExitSucceed, ExitError}; +use evm::{ExitReason, ExitSucceed, ExitError, Config}; use evm::executor::StackExecutor; use evm::backend::ApplyBackend; @@ -116,6 +116,8 @@ impl Precompiles for () { } } +static ISTANBUL_CONFIG: Config = Config::istanbul(); + /// EVM module trait pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { /// Calculator for current gas price. @@ -125,26 +127,37 @@ pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { /// Currency type for deposit and withdraw. type Currency: Currency; /// The overarching event type. - type Event: From + Into<::Event>; + type Event: From> + Into<::Event>; /// Precompiles associated with this EVM engine. type Precompiles: Precompiles; + + /// EVM config used in the module. + fn config() -> &'static Config { + &ISTANBUL_CONFIG + } } decl_storage! { trait Store for Module as EVM { - Accounts get(fn accounts) config(): map hasher(blake2_256) H160 => Account; - AccountCodes: map hasher(blake2_256) H160 => Vec; - AccountStorages: double_map hasher(blake2_256) H160, hasher(blake2_256) H256 => H256; + Accounts get(fn accounts) config(): map hasher(blake2_128_concat) H160 => Account; + AccountCodes: map hasher(blake2_128_concat) H160 => Vec; + AccountStorages: double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; } } decl_event! { /// EVM events - pub enum Event { + pub enum Event where + ::AccountId, + { /// Ethereum events from contracts. Log(Log), /// A contract has been created at given address. Created(H160), + /// A deposit has been made at a given address. + BalanceDeposit(AccountId, H160, U256), + /// A withdrawal has been made from a given address. + BalanceWithdraw(AccountId, H160, U256), } } @@ -195,6 +208,7 @@ decl_module! { Accounts::mutate(&address, |account| { account.balance += bvalue; }); + Module::::deposit_event(Event::::BalanceDeposit(sender, address, bvalue)); } /// Withdraw balance from EVM into currency/balances module. @@ -218,6 +232,7 @@ decl_module! { Accounts::insert(&address, account); T::Currency::resolve_creating(&sender, imbalance); + Module::::deposit_event(Event::::BalanceWithdraw(sender, address, bvalue)); } /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. @@ -282,7 +297,7 @@ decl_module! { }, )?; - Module::::deposit_event(Event::Created(create_address)); + Module::::deposit_event(Event::::Created(create_address)); Ok(()) } @@ -320,7 +335,7 @@ decl_module! { }, )?; - Module::::deposit_event(Event::Created(create_address)); + Module::::deposit_event(Event::::Created(create_address)); Ok(()) } } @@ -381,7 +396,7 @@ impl Module { let mut executor = StackExecutor::new_with_precompile( &backend, gas_limit as usize, - &backend::GASOMETER_CONFIG, + T::config(), T::Precompiles::execute, ); diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml index ff8d780c3cff326c3b1fb749270dc8ec09c7043a..6b3452d01894f30c46fcfefb8c81f7e0125ae9df 100644 --- a/frame/example-offchain-worker/Cargo.toml +++ b/frame/example-offchain-worker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example-offchain-worker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -9,15 +9,15 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME example pallet for offchain worker" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -serde_json = { version = "1.0.46", default-features = false, features = ["alloc"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +lite-json = { version = "0.1", default-features = false } [features] default = ["std"] @@ -26,8 +26,12 @@ std = [ "frame-support/std", "frame-system/std", "serde", + "lite-json/std", "sp-core/std", "sp-io/std", "sp-runtime/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index d4905d26b00d0866de2bd2544612b5bc458c6965..ac9ac2d1eea4f73a89be9d32dbddfda6c58d3ff4 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -47,13 +47,17 @@ use frame_support::{ weights::SimpleDispatchInfo, }; use frame_system::{self as system, ensure_signed, ensure_none, offchain}; -use serde_json as json; use sp_core::crypto::KeyTypeId; use sp_runtime::{ offchain::{http, Duration, storage::StorageValueRef}, traits::Zero, - transaction_validity::{InvalidTransaction, ValidTransaction, TransactionValidity}, + transaction_validity::{ + InvalidTransaction, ValidTransaction, TransactionValidity, TransactionSource, + TransactionPriority, + }, }; +use sp_std::vec::Vec; +use lite_json::json::JsonValue; #[cfg(test)] mod tests; @@ -103,6 +107,12 @@ pub trait Trait: frame_system::Trait { /// /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. type UnsignedInterval: Get; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } decl_storage! { @@ -276,7 +286,7 @@ impl Module { match last_send { // If we already have a value in storage and the block number is recent enough // we avoid sending another transaction at this time. - Some(Some(block)) if block + T::GracePeriod::get() < block_number => { + Some(Some(block)) if block_number < block + T::GracePeriod::get() => { Err(RECENTLY_SENT) }, // In every other case we attempt to acquire the lock and send a transaction. @@ -320,7 +330,7 @@ impl Module { } /// A helper function to fetch the price and send signed transaction. - fn fetch_price_and_send_signed() -> Result<(), String> { + fn fetch_price_and_send_signed() -> Result<(), &'static str> { use system::offchain::SubmitSignedTransaction; // Firstly we check if there are any accounts in the local keystore that are capable of // signing the transaction. @@ -334,7 +344,7 @@ impl Module { // Make an external HTTP request to fetch the current price. // Note this call will block until response is received. - let price = Self::fetch_price().map_err(|e| format!("{:?}", e))?; + let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; // Received price is wrapped into a call to `submit_price` public function of this pallet. // This means that the transaction, when executed, will simply call that function passing @@ -357,20 +367,18 @@ impl Module { } /// A helper function to fetch the price and send unsigned transaction. - fn fetch_price_and_send_unsigned(block_number: T::BlockNumber) -> Result<(), String> { + fn fetch_price_and_send_unsigned(block_number: T::BlockNumber) -> Result<(), &'static str> { use system::offchain::SubmitUnsignedTransaction; // Make sure we don't fetch the price if unsigned transaction is going to be rejected // anyway. let next_unsigned_at = >::get(); if next_unsigned_at > block_number { - return Err( - format!("Too early to send unsigned transaction. Next at: {:?}", next_unsigned_at) - )? + return Err("Too early to send unsigned transaction") } // Make an external HTTP request to fetch the current price. // Note this call will block until response is received. - let price = Self::fetch_price().map_err(|e| format!("{:?}", e))?; + let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; // Received price is wrapped into a call to `submit_price_unsigned` public function of this // pallet. This means that the transaction, when executed, will simply call that function @@ -428,21 +436,17 @@ impl Module { // Note that the return object allows you to read the body in chunks as well // with a way to control the deadline. let body = response.body().collect::>(); - // Next we parse the response using `serde_json`. Even though it's possible to use - // `serde_derive` and deserialize to a struct it's not recommended due to blob size - // overhead introduced by such code. Deserializing to `json::Value` is much more - // lightweight and should be preferred, especially if we only care about a small number - // of properties from the response. - let val: Result = json::from_slice(&body); - // Let's parse the price as float value. Note that you should avoid using floats in the - // runtime, it's fine to do that in the offchain worker, but we do convert it to an integer - // before submitting on-chain. - let price = val.ok().and_then(|v| v.get("USD").and_then(|v| v.as_f64())); - let price = match price { - Some(pricef) => Ok((pricef * 100.) as u32), + + // Create a str slice from the body. + let body_str = sp_std::str::from_utf8(&body).map_err(|_| { + debug::warn!("No UTF8 body"); + http::Error::Unknown + })?; + + let price = match Self::parse_price(body_str) { + Some(price) => Ok(price), None => { - let s = core::str::from_utf8(&body); - debug::warn!("Unable to extract price from the response: {:?}", s); + debug::warn!("Unable to extract price from the response: {:?}", body_str); Err(http::Error::Unknown) } }?; @@ -452,6 +456,28 @@ impl Module { Ok(price) } + /// Parse the price from the given JSON string using `lite-json`. + /// + /// Returns `None` when parsing failed or `Some(price in cents)` when parsing is successful. + fn parse_price(price_str: &str) -> Option { + let val = lite_json::parse_json(price_str); + let price = val.ok().and_then(|v| match v { + JsonValue::Object(obj) => { + let mut chars = "USD".chars(); + obj.into_iter() + .find(|(k, _)| k.iter().all(|k| Some(*k) == chars.next())) + .and_then(|v| match v.1 { + JsonValue::Number(number) => Some(number), + _ => None, + }) + }, + _ => None + })?; + + let exp = price.fraction_length.checked_sub(2).unwrap_or(0); + Some(price.integer as u32 * 100 + (price.fraction / 10_u64.pow(exp)) as u32) + } + /// Add new price to the list. fn add_price(who: T::AccountId, price: u32) { debug::info!("Adding to the average: {}", price); @@ -492,7 +518,10 @@ impl frame_support::unsigned::ValidateUnsigned for Module { /// By default unsigned transactions are disallowed, but implementing the validator /// here we make sure that some particular calls (the ones produced by offchain worker) /// are being whitelisted and marked as valid. - fn validate_unsigned(call: &Self::Call) -> TransactionValidity { + fn validate_unsigned( + _source: TransactionSource, + call: &Self::Call, + ) -> TransactionValidity { // Firstly let's check that we call the right function. if let Call::submit_price_unsigned(block_number, new_price) = call { // Now let's check if the transaction has any chance to succeed. @@ -515,32 +544,33 @@ impl frame_support::unsigned::ValidateUnsigned for Module { .map(|price| if &price > new_price { price - new_price } else { new_price - price }) .unwrap_or(0); - Ok(ValidTransaction { + ValidTransaction::with_tag_prefix("ExampleOffchainWorker") // We set base priority to 2**20 to make sure it's included before any other // transactions in the pool. Next we tweak the priority depending on how much // it differs from the current average. (the more it differs the more priority it // has). - priority: (1 << 20) + avg_price as u64, + .priority(T::UnsignedPriority::get().saturating_add(avg_price as _)) // This transaction does not require anything else to go before into the pool. // In theory we could require `previous_unsigned_at` transaction to go first, // but it's not necessary in our case. - requires: vec![], + //.and_requires() + // We set the `provides` tag to be the same as `next_unsigned_at`. This makes // sure only one transaction produced after `next_unsigned_at` will ever // get to the transaction pool and will end up in the block. // We can still have multiple transactions compete for the same "spot", // and the one with higher priority will replace other one in the pool. - provides: vec![codec::Encode::encode(&(KEY_TYPE.0, next_unsigned_at))], + .and_provides(next_unsigned_at) // The transaction is only valid for next 5 blocks. After that it's // going to be revalidated by the pool. - longevity: 5, + .longevity(5) // It's fine to propagate that transaction to other peers, which means it can be // created even by nodes that don't produce blocks. // Note that sometimes it's better to keep it for yourself (if you are the block // producer), since for instance in some schemes others may copy your solution and // claim a reward. - propagate: true, - }) + .propagate(true) + .build() } else { InvalidTransaction::Call.into() } diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 9b6a567a178403d9cb1b717705fa520820358bef..727c4942f688254365a301022be9abd711efd42d 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -94,6 +94,7 @@ impl frame_system::offchain::CreateTransaction for Test { parameter_types! { pub const GracePeriod: u64 = 5; pub const UnsignedInterval: u64 = 128; + pub const UnsignedPriority: u64 = 1 << 20; } impl Trait for Test { @@ -103,6 +104,7 @@ impl Trait for Test { type SubmitUnsignedTransaction = SubmitTransaction; type GracePeriod = GracePeriod; type UnsignedInterval = UnsignedInterval; + type UnsignedPriority = UnsignedPriority; } type Example = Module; @@ -132,7 +134,7 @@ fn should_make_http_call_and_parse_result() { // when let price = Example::fetch_price().unwrap(); // then - assert_eq!(price, 15522); + assert_eq!(price, 15523); }); } @@ -164,7 +166,7 @@ fn should_submit_signed_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, Call::submit_price(15522)); + assert_eq!(tx.call, Call::submit_price(15523)); }); } @@ -186,7 +188,7 @@ fn should_submit_unsigned_transaction_on_chain() { assert!(pool_state.read().transactions.is_empty()); let tx = Extrinsic::decode(&mut &*tx).unwrap(); assert_eq!(tx.signature, None); - assert_eq!(tx.call, Call::submit_price_unsigned(1, 15522)); + assert_eq!(tx.call, Call::submit_price_unsigned(1, 15523)); }); } @@ -208,3 +210,19 @@ fn price_oracle_response(state: &mut testing::OffchainState) { ..Default::default() }); } + +#[test] +fn parse_price_works() { + let test_data = vec![ + ("{\"USD\":6536.92}", Some(653692)), + ("{\"USD\":65.92}", Some(6592)), + ("{\"USD\":6536.924565}", Some(653692)), + ("{\"USD\":6536}", Some(653600)), + ("{\"USD2\":6536}", None), + ("{\"USD\":\"6432\"}", None), + ]; + + for (json, expected) in test_data { + assert_eq!(expected, Example::parse_price(json)); + } +} diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 515d1fd8d7c7a10b805b43cba89793f7f1fb67af..4053cc0b1a51a47d17a23566afc36c1798b34988 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-example" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "Unlicense" @@ -10,17 +10,18 @@ description = "FRAME example pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-alpha.5", default-features = false, path = "../balances" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } + +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core", default-features = false } [features] default = ["std"] @@ -35,3 +36,7 @@ std = [ "sp-io/std", "sp-std/std" ] +runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index e0d89fc7f39d9d5b9db303984a5dafb13db0cdc6..13985671c2e796f61a1c077fab94f3b12094b53e 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -256,14 +256,17 @@ use sp_std::marker::PhantomData; use frame_support::{ dispatch::DispatchResult, decl_module, decl_storage, decl_event, - weights::{SimpleDispatchInfo, DispatchInfo, DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee}, + weights::{ + SimpleDispatchInfo, DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, + }, }; use sp_std::prelude::*; -use frame_benchmarking::{benchmarks, account}; -use frame_system::{self as system, ensure_signed, ensure_root, RawOrigin}; +use frame_system::{self as system, ensure_signed, ensure_root}; use codec::{Encode, Decode}; use sp_runtime::{ - traits::{SignedExtension, Bounded, SaturatedConversion}, + traits::{ + SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, + }, transaction_validity::{ ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, }, @@ -345,7 +348,7 @@ decl_storage! { // - `Foo::put(1); Foo::get()` returns `1`; // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). // e.g. Foo: u32; - // e.g. pub Bar get(fn bar): map hasher(blake2_256) T::AccountId => Vec<(T::Balance, u64)>; + // e.g. pub Bar get(fn bar): map hasher(blake2_128_concat) T::AccountId => Vec<(T::Balance, u64)>; // // For basic value items, you'll get a type which implements // `frame_support::StorageValue`. For map items, you'll get a type which @@ -357,7 +360,7 @@ decl_storage! { Dummy get(fn dummy) config(): Option; // A map that has enumerable entries. - Bar get(fn bar) config(): linked_map hasher(blake2_256) T::AccountId => T::Balance; + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; // this one uses the default, we'll demonstrate the usage of 'mutate' API. Foo get(fn foo) config(): T::Balance; @@ -516,14 +519,14 @@ decl_module! { // This function could also very well have a weight annotation, similar to any other. The // only difference being that if it is not annotated, the default is // `SimpleDispatchInfo::zero()`, which resolves into no weight. - #[weight = SimpleDispatchInfo::FixedNormal(1000)] - fn on_initialize(_n: T::BlockNumber) { + fn on_initialize(_n: T::BlockNumber) -> Weight { // Anything that needs to be done at the start of the block. // We don't do anything here. + + SimpleDispatchInfo::default().weigh_data(()) } // The signature could also look like: `fn on_finalize()` - #[weight = SimpleDispatchInfo::FixedNormal(2000)] fn on_finalize(_n: T::BlockNumber) { // Anything that needs to be done at the end of the block. // We just kill our dummy storage item. @@ -617,7 +620,6 @@ impl SignedExtension for WatchDummy { // other pallets. type Call = Call; type AdditionalSigned = (); - type DispatchInfo = DispatchInfo; type Pre = (); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } @@ -626,7 +628,7 @@ impl SignedExtension for WatchDummy { &self, _who: &Self::AccountId, call: &Self::Call, - _info: Self::DispatchInfo, + _info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { // if the transaction is too big, just drop it. @@ -648,39 +650,61 @@ impl SignedExtension for WatchDummy { } } -benchmarks!{ - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); +#[cfg(feature = "runtime-benchmarks")] +mod benchmarking { + use super::*; + use frame_benchmarking::{benchmarks, account}; + use frame_system::RawOrigin; + + benchmarks!{ + _ { + // Define a common range for `b`. + let b in 1 .. 1000 => (); + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + accumulate_dummy { + let b in ...; + let caller = account("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy { + let b in ...; + }: set_dummy (RawOrigin::Root, b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..10] range. + another_set_dummy { + let b in 1 .. 10; + }: set_dummy (RawOrigin::Root, b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } } - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. - accumulate_dummy { - let b in ...; - let caller = account("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..1000] range. - set_dummy { - let b in ...; - let caller = account("caller", 0, 0); - }: set_dummy (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..10] range. - another_set_dummy { - let b in 1 .. 10; - let caller = account("caller", 0, 0); - }: set_dummy (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of sorting a vector. - sort_vector { - let x in 0 .. 10000; - let mut m = Vec::::new(); - for i in 0..x { - m.push(i); + #[cfg(test)] + mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_accumulate_dummy::()); + assert_ok!(test_benchmark_set_dummy::()); + assert_ok!(test_benchmark_another_set_dummy::()); + assert_ok!(test_benchmark_sort_vector::()); + }); } - }: { - m.sort(); } } @@ -688,14 +712,17 @@ benchmarks!{ mod tests { use super::*; - use frame_support::{assert_ok, impl_outer_origin, parameter_types, weights::GetDispatchInfo}; + use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, weights::{DispatchInfo, GetDispatchInfo}, + traits::{OnInitialize, OnFinalize} + }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ Perbill, testing::Header, - traits::{BlakeTwo256, OnInitialize, OnFinalize, IdentityLookup}, + traits::{BlakeTwo256, IdentityLookup}, }; impl_outer_origin! { @@ -752,7 +779,7 @@ mod tests { // This function basically just builds a genesis storage key/value store according to // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { + pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); @@ -802,13 +829,13 @@ mod tests { let info = DispatchInfo::default(); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, info, 150) + WatchDummy::(PhantomData).validate(&1, &call, &info, 150) .unwrap() .priority, Bounded::max_value(), ); assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, info, 250), + WatchDummy::(PhantomData).validate(&1, &call, &info, 250), InvalidTransaction::ExhaustsResources.into(), ); }) diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 44751b0f3fc538445422bce5ff8be025630c68bc..3c494199cb2374e07a45b1b888d3af52545681b8 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-executive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,21 +9,21 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME executives engine" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } [dev-dependencies] hex-literal = "0.2.1" -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-io ={ path = "../../primitives/io", version = "2.0.0-alpha.2"} -pallet-indices = { version = "2.0.0-alpha.2", path = "../indices" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../transaction-payment" } -sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-io ={ path = "../../primitives/io", version = "2.0.0-alpha.5"} +pallet-indices = { version = "2.0.0-alpha.5", path = "../indices" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.5", path = "../transaction-payment" } +sp-version = { version = "2.0.0-alpha.5", path = "../../primitives/version" } [features] default = ["std"] @@ -35,3 +35,6 @@ std = [ "sp-runtime/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index e81b883f00882a26cd0835c135f0608e793f2dbd..20c79fe4a5cac6b0529045c581ff4523952aab95 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -59,12 +59,14 @@ //! # pub type Balances = u64; //! # pub type AllModules = u64; //! # pub enum Runtime {}; -//! # use sp_runtime::transaction_validity::{TransactionValidity, UnknownTransaction}; +//! # use sp_runtime::transaction_validity::{ +//! TransactionValidity, UnknownTransaction, TransactionSource, +//! # }; //! # use sp_runtime::traits::ValidateUnsigned; //! # impl ValidateUnsigned for Runtime { //! # type Call = (); //! # -//! # fn validate_unsigned(_call: &Self::Call) -> TransactionValidity { +//! # fn validate_unsigned(_source: TransactionSource, _call: &Self::Call) -> TransactionValidity { //! # UnknownTransaction::NoUnsignedValidator.into() //! # } //! # } @@ -75,16 +77,18 @@ #![cfg_attr(not(feature = "std"), no_std)] use sp_std::{prelude::*, marker::PhantomData}; -use frame_support::{storage::StorageValue, weights::{GetDispatchInfo, WeighBlock, DispatchInfo}}; +use frame_support::{ + storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo}, + traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, +}; use sp_runtime::{ generic::Digest, ApplyExtrinsicResult, traits::{ - self, Header, Zero, One, Checkable, Applyable, CheckEqual, OnFinalize, OnInitialize, - NumberFor, Block as BlockT, OffchainWorker, Dispatchable, Saturating, OnRuntimeUpgrade, + self, Header, Zero, One, Checkable, Applyable, CheckEqual, ValidateUnsigned, NumberFor, + Block as BlockT, Dispatchable, Saturating, }, - transaction_validity::TransactionValidity, + transaction_validity::{TransactionValidity, TransactionSource}, }; -use sp_runtime::traits::ValidateUnsigned; use codec::{Codec, Encode}; use frame_system::{extrinsics_root, DigestOf}; @@ -111,15 +115,14 @@ impl< OnRuntimeUpgrade + OnInitialize + OnFinalize + - OffchainWorker + - WeighBlock, + OffchainWorker, > ExecuteBlock for Executive where Block::Extrinsic: Checkable + Codec, CheckedOf: - Applyable + + Applyable + GetDispatchInfo, - CallOf: Dispatchable, + CallOf: Dispatchable, OriginOf: From>, UnsignedValidator: ValidateUnsigned>, { @@ -137,15 +140,14 @@ impl< OnRuntimeUpgrade + OnInitialize + OnFinalize + - OffchainWorker + - WeighBlock, + OffchainWorker, > Executive where Block::Extrinsic: Checkable + Codec, CheckedOf: - Applyable + + Applyable + GetDispatchInfo, - CallOf: Dispatchable, + CallOf: Dispatchable, OriginOf: From>, UnsignedValidator: ValidateUnsigned>, { @@ -179,10 +181,8 @@ where if Self::runtime_upgraded() { // System is not part of `AllModules`, so we need to call this manually. as OnRuntimeUpgrade>::on_runtime_upgrade(); - ::on_runtime_upgrade(); - >::register_extra_weight_unchecked( - >::on_runtime_upgrade() - ); + let weight = ::on_runtime_upgrade(); + >::register_extra_weight_unchecked(weight); } >::initialize( block_number, @@ -191,13 +191,11 @@ where digest, frame_system::InitKind::Full, ); - >::on_initialize(*block_number); - >::register_extra_weight_unchecked( - >::on_initialize(*block_number) - ); - >::register_extra_weight_unchecked( - >::on_finalize(*block_number) - ); + as OnInitialize>::on_initialize(*block_number); + let weight = >::on_initialize(*block_number); + >::register_extra_weight_unchecked(weight); + + frame_system::Module::::note_finished_initialize(); } /// Returns if the runtime was upgraded since the last time this function was called. @@ -249,11 +247,11 @@ where /// Execute given extrinsics and take care of post-extrinsics book-keeping. fn execute_extrinsics_with_book_keeping(extrinsics: Vec, block_number: NumberFor) { - extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); // post-extrinsics book-keeping >::note_finished_extrinsics(); + as OnFinalize>::on_finalize(block_number); >::on_finalize(block_number); } @@ -261,7 +259,9 @@ where /// except state-root. pub fn finalize_block() -> System::Header { >::note_finished_extrinsics(); - >::on_finalize(>::block_number()); + let block_number = >::block_number(); + as OnFinalize>::on_finalize(block_number); + >::on_finalize(block_number); // set up extrinsics >::derive_extrinsics(); @@ -307,7 +307,7 @@ where // Decode parameters and dispatch let dispatch_info = xt.get_dispatch_info(); - let r = Applyable::apply::(xt, dispatch_info, encoded_len)?; + let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; >::note_applied_extrinsic(&r, encoded_len as u32, dispatch_info); @@ -340,12 +340,15 @@ where /// side-effects; it merely checks whether the transaction would panic if it were included or not. /// /// Changes made to storage should be discarded. - pub fn validate_transaction(uxt: Block::Extrinsic) -> TransactionValidity { + pub fn validate_transaction( + source: TransactionSource, + uxt: Block::Extrinsic, + ) -> TransactionValidity { let encoded_len = uxt.using_encoded(|d| d.len()); let xt = uxt.check(&Default::default())?; let dispatch_info = xt.get_dispatch_info(); - xt.validate::(dispatch_info, encoded_len) + xt.validate::(source, &dispatch_info, encoded_len) } /// Start an offchain worker and generate extrinsics. @@ -395,7 +398,7 @@ mod tests { use hex_literal::hex; mod custom { - use frame_support::weights::SimpleDispatchInfo; + use frame_support::weights::{SimpleDispatchInfo, Weight}; pub trait Trait: frame_system::Trait {} @@ -417,11 +420,11 @@ mod tests { // module hooks. // one with block number arg and one without - #[weight = SimpleDispatchInfo::FixedNormal(25)] - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { println!("on_initialize({})", n); + 175 } - #[weight = SimpleDispatchInfo::FixedNormal(150)] + fn on_finalize() { println!("on_finalize(?)"); } @@ -513,7 +516,10 @@ mod tests { Ok(()) } - fn validate_unsigned(call: &Self::Call) -> TransactionValidity { + fn validate_unsigned( + _source: TransactionSource, + call: &Self::Call, + ) -> TransactionValidity { match call { Call::Balances(BalancesCall::set_balance(_, _, _)) => Ok(Default::default()), _ => UnknownTransaction::NoUnsignedValidator.into(), @@ -537,7 +543,7 @@ mod tests { frame_system::CheckEra, frame_system::CheckNonce, frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment + pallet_transaction_payment::ChargeTransactionPayment, ); type AllModules = (System, Balances, Custom); type TestXt = sp_runtime::testing::TestXt; @@ -595,7 +601,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("e97d724f480f6e3215bd5c24b9ba51250e2514ac1c99e563fd77bfb9d6100b1c").into(), + state_root: hex!("489ae9b57a19bb4733a264dc64bbcae9b140a904657a681ed3bb5fbbe8cf412b").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, @@ -727,7 +733,10 @@ mod tests { let mut t = new_test_ext(1); t.execute_with(|| { - assert_eq!(Executive::validate_transaction(xt.clone()), Ok(Default::default())); + assert_eq!( + Executive::validate_transaction(TransactionSource::InBlock, xt.clone()), + Ok(Default::default()), + ); assert_eq!(Executive::apply_extrinsic(xt), Ok(Err(DispatchError::BadOrigin))); }); } diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 1313080dfe0044ce3d0ee35ad7fcf895c071b4ab..e261fae05f5afd4bda7e69a0fc26e3d052890488 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-finality-tracker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,18 +12,18 @@ documentation = "https://docs.rs/pallet-finality-tracker" [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/finality-tracker" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/finality-tracker" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } [features] default = ["std"] @@ -37,3 +37,6 @@ std = [ "sp-finality-tracker/std", "sp-inherents/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index 3b6de38de8e79ea025c6b6ecc0e89a70189c159a..8200543ffa17136173346a5bf0fb6f9ece5277e2 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -26,8 +26,6 @@ use frame_support::traits::Get; use frame_system::{ensure_none, Trait as SystemTrait}; use sp_finality_tracker::{INHERENT_IDENTIFIER, FinalizedInherentData}; -mod migration; - pub const DEFAULT_WINDOW_SIZE: u32 = 101; pub const DEFAULT_REPORT_LATENCY: u32 = 1000; @@ -78,6 +76,7 @@ decl_module! { /// Hint that the author of this block thinks the best finalized /// block is the given number. + #[weight = frame_support::weights::SimpleDispatchInfo::FixedMandatory(10_000)] fn final_hint(origin, #[compact] hint: T::BlockNumber) { ensure_none(origin)?; ensure!(!::Update::exists(), Error::::AlreadyUpdated); @@ -91,10 +90,6 @@ decl_module! { fn on_finalize() { Self::update_hint(::Update::take()) } - - fn on_runtime_upgrade() { - migration::on_runtime_upgrade::() - } } } @@ -213,9 +208,11 @@ mod tests { use sp_core::H256; use sp_runtime::{ testing::Header, Perbill, - traits::{BlakeTwo256, IdentityLookup, OnFinalize, Header as HeaderT}, + traits::{BlakeTwo256, IdentityLookup, Header as HeaderT}, + }; + use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, weights::Weight, traits::OnFinalize }; - use frame_support::{assert_ok, impl_outer_origin, parameter_types, weights::Weight}; use frame_system as system; use std::cell::RefCell; diff --git a/frame/finality-tracker/src/migration.rs b/frame/finality-tracker/src/migration.rs deleted file mode 100644 index 1eff123db370e9ca8aac909074b5f9cb5859f032..0000000000000000000000000000000000000000 --- a/frame/finality-tracker/src/migration.rs +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -// Migration code to update storage. - -use super::*; -use frame_support::storage::migration::{put_storage_value, take_storage_value}; - -pub fn on_runtime_upgrade() { - change_name_timestamp_to_finality_tracker::() -} - -// Change the storage name used by this pallet from `Timestamp` to `FinalityTracker`. -// -// Since the format of the storage items themselves have not changed, we do not -// need to keep track of a storage version. If the runtime does not need to be -// upgraded, nothing here will happen anyway. - -fn change_name_timestamp_to_finality_tracker() { - sp_runtime::print("Migrating Finality Tracker."); - - if let Some(recent_hints) = take_storage_value::>(b"Timestamp", b"RecentHints", &[]) { - put_storage_value(b"FinalityTracker", b"RecentHints", &[], recent_hints); - } - - if let Some(ordered_hints) = take_storage_value::>(b"Timestamp", b"OrderedHints", &[]) { - put_storage_value(b"FinalityTracker", b"OrderedHints", &[], ordered_hints); - } - - if let Some(median) = take_storage_value::(b"Timestamp", b"Median", &[]) { - put_storage_value(b"FinalityTracker", b"Median", &[], median); - } - - if let Some(update) = take_storage_value::(b"Timestamp", b"Update", &[]) { - put_storage_value(b"FinalityTracker", b"Update", &[], update); - } - - if let Some(initialized) = take_storage_value::(b"Timestamp", b"Initialized", &[]) { - put_storage_value(b"FinalityTracker", b"Initialized", &[], initialized); - } -} diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index a8df92e3c61706c356117ff8c8403fe7b37ac8e0..b531a0ed9af24be72d5146a0d06ada267521a8e8 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-generic-asset" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Centrality Developers "] edition = "2018" license = "GPL-3.0" @@ -10,15 +10,15 @@ description = "FRAME pallet for generic asset management" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -30,3 +30,6 @@ std =[ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index f1713dd586a93a87b69565fdd1cb9bd9c38c1e02..b16666cb6b7e1c5dd96616a38035d141a70f371d 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -360,12 +360,14 @@ decl_module! { fn deposit_event() = default; /// Create a new kind of asset. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn create(origin, options: AssetOptions) -> DispatchResult { let origin = ensure_signed(origin)?; Self::create_asset(None, Some(origin), options) } /// Transfer some liquid free balance to another account. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn transfer(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, #[compact] amount: T::Balance) { let origin = ensure_signed(origin)?; ensure!(!amount.is_zero(), Error::::ZeroAmount); @@ -375,6 +377,7 @@ decl_module! { /// Updates permission for a given `asset_id` and an account. /// /// The `origin` must have `update` permission. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn update_permission( origin, #[compact] asset_id: T::AssetId, @@ -397,6 +400,7 @@ decl_module! { /// Mints an asset, increases its total issuance. /// The origin must have `mint` permissions. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { let who = ensure_signed(origin)?; Self::mint_free(&asset_id, &who, &to, &amount)?; @@ -406,6 +410,7 @@ decl_module! { /// Burns an asset, decreases its total issuance. /// The `origin` must have `burn` permissions. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { let who = ensure_signed(origin)?; Self::burn_free(&asset_id, &who, &to, &amount)?; @@ -415,6 +420,7 @@ decl_module! { /// Can be used to create reserved tokens. /// Requires Root call. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn create_reserved( origin, asset_id: T::AssetId, @@ -439,26 +445,26 @@ decl_storage! { pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { let issuance = config.initial_balance * (config.endowed_accounts.len() as u32).into(); config.assets.iter().map(|id| (id.clone(), issuance)).collect::>() - }): map hasher(blake2_256) T::AssetId => T::Balance; + }): map hasher(twox_64_concat) T::AssetId => T::Balance; /// The free balance of a given asset under an account. pub FreeBalance: - double_map hasher(blake2_256) T::AssetId, hasher(twox_128) T::AccountId => T::Balance; + double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; /// The reserved balance of a given asset under an account. pub ReservedBalance: - double_map hasher(blake2_256) T::AssetId, hasher(twox_128) T::AccountId => T::Balance; + double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; /// Next available ID for user-created asset. pub NextAssetId get(fn next_asset_id) config(): T::AssetId; /// Permission options for a given asset. pub Permissions get(fn get_permission): - map hasher(blake2_256) T::AssetId => PermissionVersions; + map hasher(twox_64_concat) T::AssetId => PermissionVersions; /// Any liquidity locks on some account balances. pub Locks get(fn locks): - map hasher(blake2_256) T::AccountId => Vec>; + map hasher(blake2_128_concat) T::AccountId => Vec>; /// The identity of the asset which is the one that is designated for the chain's staking system. pub StakingAssetId get(fn staking_asset_id) config(): T::AssetId; diff --git a/frame/generic-asset/src/mock.rs b/frame/generic-asset/src/mock.rs index 8db140d90c666359cf21092a3fc4c9f49217d1e9..c805b793bc7b4e358e8c8f2b4b5045a8c14dd360 100644 --- a/frame/generic-asset/src/mock.rs +++ b/frame/generic-asset/src/mock.rs @@ -127,16 +127,17 @@ impl ExtBuilder { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); GenesisConfig:: { - assets: vec![self.asset_id], - endowed_accounts: self.accounts, - initial_balance: self.initial_balance, - next_asset_id: self.next_asset_id, - staking_asset_id: 16000, - spending_asset_id: 16001, - } - .assimilate_storage(&mut t).unwrap(); - - t.into() + assets: vec![self.asset_id], + endowed_accounts: self.accounts, + initial_balance: self.initial_balance, + next_asset_id: self.next_asset_id, + staking_asset_id: 16000, + spending_asset_id: 16001, + }.assimilate_storage(&mut t).unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } } diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index f5cce65fe906a55ca71bab5287b0f3ec69670d95..206b563bd9555c9f163d82473788a8baf3ecc275 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-grandpa" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,19 +10,19 @@ description = "FRAME pallet for GRANDPA finality gadget" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/finality-grandpa" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } -pallet-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../finality-tracker" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/finality-grandpa" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../session" } +pallet-finality-tracker = { version = "2.0.0-alpha.5", default-features = false, path = "../finality-tracker" } [dev-dependencies] -sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-io ={ version = "2.0.0-alpha.5", path = "../../primitives/io" } [features] default = ["std"] @@ -39,4 +39,6 @@ std = [ "pallet-session/std", "pallet-finality-tracker/std", ] -migrate-authorities = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 3210627f915229bce907c0296749c5620bd241b2..030699b52587262eef974a923fd4074a7fb043e3 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -34,7 +34,7 @@ use sp_std::prelude::*; use codec::{self as codec, Encode, Decode}; use frame_support::{decl_event, decl_storage, decl_module, decl_error, storage}; use sp_runtime::{ - DispatchResult, generic::{DigestItem, OpaqueDigestItemId}, traits::Zero, Perbill, PerThing, + DispatchResult, generic::{DigestItem, OpaqueDigestItemId}, traits::Zero, Perbill, }; use sp_staking::{ SessionIndex, @@ -151,13 +151,6 @@ decl_error! { decl_storage! { trait Store for Module as GrandpaFinality { - /// DEPRECATED - /// - /// This used to store the current authority set, which has been migrated to the well-known - /// GRANDPA_AUTHORITIES_KEY unhashed key. - #[cfg(feature = "migrate-authorities")] - pub(crate) Authorities get(fn authorities): AuthorityList; - /// State of the current authority set. State get(fn state): StoredState = StoredState::Live; @@ -174,8 +167,9 @@ decl_storage! { /// in the "set" of Grandpa validators from genesis. CurrentSetId get(fn current_set_id) build(|_| fg_primitives::SetId::default()): SetId; - /// A mapping from grandpa set ID to the index of the *most recent* session for which its members were responsible. - SetIdSession get(fn session_for_set): map hasher(blake2_256) SetId => Option; + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + SetIdSession get(fn session_for_set): map hasher(twox_64_concat) SetId => Option; } add_extra_genesis { config(authorities): AuthorityList; @@ -190,16 +184,12 @@ decl_module! { fn deposit_event() = default; /// Report some misbehavior. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn report_misbehavior(origin, _report: Vec) { ensure_signed(origin)?; // FIXME: https://github.com/paritytech/substrate/issues/1112 } - fn on_initialize() { - #[cfg(feature = "migrate-authorities")] - Self::migrate_authorities(); - } - fn on_finalize(block_number: T::BlockNumber) { // check for scheduled pending authority set changes if let Some(pending_change) = >::get() { @@ -370,13 +360,6 @@ impl Module { Self::set_grandpa_authorities(authorities); } } - - #[cfg(feature = "migrate-authorities")] - fn migrate_authorities() { - if Authorities::exists() { - Self::set_grandpa_authorities(&Authorities::take()); - } - } } impl Module { diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index ff3841b8d4593dcb111492f830402c9a15eb5276..b583c31968d896e36e11811ff0193bea7a629ff9 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -18,7 +18,8 @@ #![cfg(test)] -use sp_runtime::{testing::{H256, Digest}, traits::{Header, OnFinalize}}; +use sp_runtime::{testing::{H256, Digest}, traits::Header}; +use frame_support::traits::OnFinalize; use crate::mock::*; use frame_system::{EventRecord, Phase}; use codec::{Decode, Encode}; @@ -318,21 +319,3 @@ fn time_slot_have_sane_ord() { ]; assert!(FIXTURE.windows(2).all(|f| f[0] < f[1])); } - -#[test] -#[cfg(feature = "migrate-authorities")] -fn authorities_migration() { - use sp_runtime::traits::OnInitialize; - - with_externalities(&mut new_test_ext(vec![]), || { - let authorities = to_authorities(vec![(1, 1), (2, 1), (3, 1)]); - - Authorities::put(authorities.clone()); - assert!(Grandpa::grandpa_authorities().is_empty()); - - Grandpa::on_initialize(1); - - assert!(!Authorities::exists()); - assert_eq!(Grandpa::grandpa_authorities(), authorities); - }); -} diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index 3dbffedffcdc3ea332807f8542989e04121c92f4..22b385d06d76f36677a50b2afcbe51ad564e970f 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-identity" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,18 +10,18 @@ description = "FRAME identity management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -36,3 +36,6 @@ std = [ "frame-system/std", ] runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index 87d577c4d8e88b420760924812f18a9238c5d8f0..fe99cd990720cf0daeb7b5c98fb4c2f0e6556c43 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -16,6 +16,8 @@ //! Identity pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use super::*; use frame_system::RawOrigin; @@ -53,7 +55,7 @@ fn add_registrars(r: u32) -> Result<(), &'static str> { // Adds `s` sub-accounts to the identity of `who`. Each wil have 32 bytes of raw data added to it. // This additionally returns the vector of sub-accounts to it can be modified if needed. -fn add_sub_accounts(who: T::AccountId, s: u32) -> Result, &'static str> { +fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { let mut subs = Vec::new(); let who_origin = RawOrigin::Signed(who.clone()); let data = Data::Raw(vec![0; 32]); @@ -100,7 +102,7 @@ benchmarks! { let s in 1 .. T::MaxSubAccounts::get() => { // Give them s many sub accounts let caller = account::("caller", 0); - let _ = add_sub_accounts::(caller, s)?; + let _ = add_sub_accounts::(&caller, s)?; }; let x in 1 .. T::MaxAdditionalFields::get() => { // Create their main identity with x additional fields @@ -149,11 +151,19 @@ benchmarks! { ) set_subs { - let s in ...; - let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let subs = Module::::subs(&caller); + + // Give them s many sub accounts. + let s in 1 .. T::MaxSubAccounts::get() - 1 => { + let _ = add_sub_accounts::(&caller, s)?; + }; + + let mut subs = Module::::subs(&caller); + + // Create an s + 1 sub account. + let data = Data::Raw(vec![0; 32]); + subs.push((account::("sub", s + 1), data)); + }: _(RawOrigin::Signed(caller), subs) clear_identity { @@ -270,3 +280,27 @@ benchmarks! { } }: _(RawOrigin::Root, caller_lookup) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_add_registrar::()); + assert_ok!(test_benchmark_set_identity::()); + assert_ok!(test_benchmark_set_subs::()); + assert_ok!(test_benchmark_clear_identity::()); + assert_ok!(test_benchmark_request_judgement::()); + assert_ok!(test_benchmark_cancel_request::()); + assert_ok!(test_benchmark_set_fee::()); + assert_ok!(test_benchmark_set_account_id::()); + assert_ok!(test_benchmark_set_fields::()); + assert_ok!(test_benchmark_provide_judgement::()); + assert_ok!(test_benchmark_kill_identity::()); + }); + } +} diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index e993a0bebcdbdb449c0387b0c8a2558c524188a8..2a2d1c9cf8404d68a956d1d9fe098bfc2cc8f042 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -70,17 +70,15 @@ use sp_std::{fmt::Debug, ops::Add, iter::once}; use enumflags2::BitFlags; use codec::{Encode, Decode}; use sp_runtime::{DispatchResult, RuntimeDebug}; -use sp_runtime::traits::{StaticLookup, EnsureOrigin, Zero, AppendZerosInput}; +use sp_runtime::traits::{StaticLookup, Zero, AppendZerosInput}; use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, - traits::{Currency, ReservableCurrency, OnUnbalanced, Get, BalanceStatus}, + traits::{Currency, ReservableCurrency, OnUnbalanced, Get, BalanceStatus, EnsureOrigin}, weights::SimpleDispatchInfo, }; use frame_system::{self as system, ensure_signed, ensure_root}; -#[cfg(feature = "runtime-benchmarks")] -pub mod benchmarking; -mod migration; +mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; @@ -386,18 +384,18 @@ decl_storage! { trait Store for Module as Identity { /// Information that is pertinent to identify the entity behind an account. pub IdentityOf get(fn identity): - map hasher(blake2_256) T::AccountId => Option>>; + map hasher(twox_64_concat) T::AccountId => Option>>; /// The super-identity of an alternative "sub" identity together with its name, within that /// context. If the account is not some other account's sub-identity, then just `None`. pub SuperOf get(fn super_of): - map hasher(blake2_256) T::AccountId => Option<(T::AccountId, Data)>; + map hasher(blake2_128_concat) T::AccountId => Option<(T::AccountId, Data)>; /// Alternative "sub" identities of this account. /// /// The first item is the deposit, the second is a vector of the accounts. pub SubsOf get(fn subs_of): - map hasher(blake2_256) T::AccountId => (BalanceOf, Vec); + map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); /// The set of registrars. Not expected to get very big as can only be added through a /// special origin (likely a council motion). @@ -874,10 +872,6 @@ decl_module! { Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); } - - fn on_runtime_upgrade() { - migration::on_runtime_upgrade::() - } } } @@ -983,7 +977,7 @@ mod tests { // This function basically just builds a genesis storage key/value store according to // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { + pub fn new_test_ext() -> sp_io::TestExternalities { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); // We use default for brevity, but you can configure as desired if needed. pallet_balances::GenesisConfig:: { diff --git a/frame/identity/src/migration.rs b/frame/identity/src/migration.rs deleted file mode 100644 index e312d9e04f239ab33e034f3ff03b0939d1a479d0..0000000000000000000000000000000000000000 --- a/frame/identity/src/migration.rs +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Migration code to update storage. - -use super::*; -use frame_support::storage::migration::{put_storage_value, take_storage_value, StorageIterator}; - -pub fn on_runtime_upgrade() { - change_name_sudo_to_identity::() -} - -// Change the storage name used by this pallet from `Sudo` to `Identity`. -// -// Since the format of the storage items themselves have not changed, we do not -// need to keep track of a storage version. If the runtime does not need to be -// upgraded, nothing here will happen anyway. - -fn change_name_sudo_to_identity() { - sp_runtime::print("Migrating Identity."); - - for (hash, identity_of) in StorageIterator::>>::new(b"Sudo", b"IdentityOf").drain() { - put_storage_value(b"Identity", b"IdentityOf", &hash, identity_of); - } - - for (hash, super_of) in StorageIterator::<(T::AccountId, Data)>::new(b"Sudo", b"SuperOf").drain() { - put_storage_value(b"Identity", b"SuperOf", &hash, super_of); - } - - for (hash, subs_of) in StorageIterator::<(BalanceOf, Vec)>::new(b"Sudo", b"SubsOf").drain() { - put_storage_value(b"Identity", b"SubsOf", &hash, subs_of); - } - - if let Some(registrars) = take_storage_value::, T::AccountId>>>>(b"Sudo", b"Registrars", &[]) { - put_storage_value(b"Identity", b"Registrars", &[], registrars); - } - - sp_runtime::print("Done Identity."); -} diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index ab8c066945480dee7feaf11fb14873e71695bfa0..dabcc45ef166af7fdf5c47e6a86bbc41732017a0 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-im-online" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,18 +9,20 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME's I'm online pallet" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "2.0.0-alpha.5", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../session" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } + +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [features] default = ["std", "pallet-session/historical"] @@ -38,3 +40,7 @@ std = [ "frame-support/std", "frame-system/std", ] +runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..e0e74bccfa35da9a7954daa266014ae671fc5a18 --- /dev/null +++ b/frame/im-online/src/benchmarking.rs @@ -0,0 +1,93 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! I'm Online pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_system::RawOrigin; +use frame_benchmarking::benchmarks; +use sp_core::offchain::{OpaquePeerId, OpaqueMultiaddr}; +use sp_runtime::traits::{ValidateUnsigned, Zero}; +use sp_runtime::transaction_validity::TransactionSource; + +use crate::Module as ImOnline; + +const MAX_KEYS: u32 = 1000; +const MAX_EXTERNAL_ADDRESSES: u32 = 100; + +pub fn create_heartbeat(k: u32, e: u32) -> + Result<(crate::Heartbeat, ::Signature), &'static str> +{ + let mut keys = Vec::new(); + for _ in 0..k { + keys.push(T::AuthorityId::generate_pair(None)); + } + Keys::::put(keys.clone()); + + let network_state = OpaqueNetworkState { + peer_id: OpaquePeerId::default(), + external_addresses: vec![OpaqueMultiaddr::new(vec![0; 32]); e as usize], + }; + let input_heartbeat = Heartbeat { + block_number: T::BlockNumber::zero(), + network_state, + session_index: 0, + authority_index: k-1, + }; + + let encoded_heartbeat = input_heartbeat.encode(); + let authority_id = keys.get((k-1) as usize).ok_or("out of range")?; + let signature = authority_id.sign(&encoded_heartbeat).ok_or("couldn't make signature")?; + + Ok((input_heartbeat, signature)) +} + +benchmarks! { + _{ } + + heartbeat { + let k in 1 .. MAX_KEYS; + let e in 1 .. MAX_EXTERNAL_ADDRESSES; + let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + }: _(RawOrigin::None, input_heartbeat, signature) + + validate_unsigned { + let k in 1 .. MAX_KEYS; + let e in 1 .. MAX_EXTERNAL_ADDRESSES; + let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + let call = Call::heartbeat(input_heartbeat, signature); + }: { + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call)?; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Runtime}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_heartbeat::()); + assert_ok!(test_benchmark_validate_unsigned::()); + }); + } +} diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 9aa8d2d67f6feb6d1ef5c0d18b2f30280d60b678..9c2b55a5c037e4833722253b0ee9bab775d5ce86 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -50,6 +50,7 @@ //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = frame_support::weights::SimpleDispatchInfo::default()] //! pub fn is_online(origin, authority_index: u32) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; //! let _is_online = >::is_online(authority_index); @@ -69,6 +70,7 @@ mod mock; mod tests; +mod benchmarking; use sp_application_crypto::RuntimeAppPublic; use codec::{Encode, Decode}; @@ -79,9 +81,9 @@ use pallet_session::historical::IdentificationTuple; use sp_runtime::{ offchain::storage::StorageValueRef, RuntimeDebug, - traits::{Convert, Member, Saturating, AtLeast32Bit}, Perbill, PerThing, + traits::{Convert, Member, Saturating, AtLeast32Bit}, Perbill, transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, + TransactionValidity, ValidTransaction, InvalidTransaction, TransactionSource, TransactionPriority, }, }; @@ -245,6 +247,12 @@ pub trait Trait: frame_system::Trait + pallet_session::historical::Trait { IdentificationTuple, UnresponsivenessOffence>, >; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } decl_event!( @@ -274,16 +282,17 @@ decl_storage! { /// The current set of keys that may issue a heartbeat. Keys get(fn keys): Vec; - /// For each session index, we keep a mapping of `AuthIndex` - /// to `offchain::OpaqueNetworkState`. + /// For each session index, we keep a mapping of `AuthIndex` to + /// `offchain::OpaqueNetworkState`. ReceivedHeartbeats get(fn received_heartbeats): - double_map hasher(blake2_256) SessionIndex, hasher(blake2_256) AuthIndex + double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) AuthIndex => Option>; /// For each session index, we keep a mapping of `T::ValidatorId` to the /// number of blocks authored by the given authority. AuthoredBlocks get(fn authored_blocks): - double_map hasher(blake2_256) SessionIndex, hasher(blake2_256) T::ValidatorId => u32; + double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) T::ValidatorId + => u32; } add_extra_genesis { config(keys): Vec; @@ -307,6 +316,7 @@ decl_module! { fn deposit_event() = default; + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn heartbeat( origin, heartbeat: Heartbeat, @@ -622,7 +632,10 @@ impl pallet_session::OneSessionHandler for Module { impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; - fn validate_unsigned(call: &Self::Call) -> TransactionValidity { + fn validate_unsigned( + _source: TransactionSource, + call: &Self::Call, + ) -> TransactionValidity { if let Call::heartbeat(heartbeat, signature) = call { if >::is_online(heartbeat.authority_index) { // we already received a heartbeat for this authority @@ -651,13 +664,14 @@ impl frame_support::unsigned::ValidateUnsigned for Module { return InvalidTransaction::BadProof.into(); } - Ok(ValidTransaction { - priority: TransactionPriority::max_value(), - requires: vec![], - provides: vec![(current_session, authority_id).encode()], - longevity: TryInto::::try_into(T::SessionDuration::get() / 2.into()).unwrap_or(64_u64), - propagate: true, - }) + ValidTransaction::with_tag_prefix("ImOnline") + .priority(T::UnsignedPriority::get()) + .and_provides((current_session, authority_id)) + .longevity(TryInto::::try_into( + T::SessionDuration::get() / 2.into() + ).unwrap_or(64_u64)) + .propagate(true) + .build() } else { InvalidTransaction::Call.into() } @@ -672,11 +686,11 @@ pub struct UnresponsivenessOffence { /// /// It acts as a time measure for unresponsiveness reports and effectively will always point /// at the end of the session. - session_index: SessionIndex, + pub session_index: SessionIndex, /// The size of the validator set in current session/era. - validator_set_count: u32, + pub validator_set_count: u32, /// Authorities that were unresponsive during the current era. - offenders: Vec, + pub offenders: Vec, } impl Offence for UnresponsivenessOffence { diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 78b6409d543eb366ca77e5b94a1cea2ac92973cb..73ccaf3f707cbc7f37f7902dd1ba11d7e4e73296 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -141,6 +141,7 @@ impl pallet_session::Trait for Runtime { type Keys = UintAuthorityId; type Event = (); type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; } impl pallet_session::historical::Trait for Runtime { @@ -159,6 +160,10 @@ impl pallet_authorship::Trait for Runtime { type EventHandler = ImOnline; } +parameter_types! { + pub const UnsignedPriority: u64 = 1 << 20; +} + impl Trait for Runtime { type AuthorityId = UintAuthorityId; type Event = (); @@ -166,6 +171,7 @@ impl Trait for Runtime { type SubmitTransaction = SubmitTransaction; type ReportUnresponsiveness = OffenceHandler; type SessionDuration = Period; + type UnsignedPriority = UnsignedPriority; } /// Im Online module. @@ -174,7 +180,7 @@ pub type System = frame_system::Module; pub type Session = pallet_session::Module; pub fn advance_session() { - let now = System::block_number(); + let now = System::block_number().max(1); System::set_block_number(now + 1); Session::rotate_session(); assert_eq!(Session::current_index(), (now / Period::get()) as u32); diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index b43adca0fd485d36b5c8d2f8d81ce365d8518711..c7bf2afcca629f74765c1519abdd063e63dae1f1 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -190,7 +190,7 @@ fn late_heartbeat_should_fail() { #[test] fn should_generate_heartbeats() { - use sp_runtime::traits::OffchainWorker; + use frame_support::traits::OffchainWorker; let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index d7e01765b59d165a2f1f8d52e44e1e3b61ee18e4..f28f393642bedac28e1644f35046d6ad9c1b4a2e 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-indices" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,17 @@ description = "FRAME indices management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-keyring = { version = "2.0.0-alpha.5", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -35,3 +35,6 @@ std = [ "sp-runtime/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 95ac6cf75283816bb3058f148d849f365de1345d..d2ba664d425ffce1ef0aac0745fea825d7183a42 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -25,6 +25,7 @@ use sp_runtime::traits::{ StaticLookup, Member, LookupError, Zero, One, BlakeTwo256, Hash, Saturating, AtLeast32Bit }; use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; +use frame_support::weights::{Weight, SimpleDispatchInfo, WeighData}; use frame_support::dispatch::DispatchResult; use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; use frame_support::storage::migration::take_storage_value; @@ -98,8 +99,10 @@ decl_module! { pub struct Module for enum Call where origin: T::Origin, system = frame_system { fn deposit_event() = default; - fn on_initialize() { + fn on_initialize() -> Weight { Self::migrations(); + + SimpleDispatchInfo::default().weigh_data(()) } /// Assign an previously unassigned index. @@ -118,6 +121,7 @@ decl_module! { /// - One reserve operation. /// - One event. /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn claim(origin, index: T::AccountIndex) { let who = ensure_signed(origin)?; @@ -145,6 +149,7 @@ decl_module! { /// - One transfer operation. /// - One event. /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { let who = ensure_signed(origin)?; ensure!(who != new, Error::::NotTransfer); @@ -175,6 +180,7 @@ decl_module! { /// - One reserve operation. /// - One event. /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn free(origin, index: T::AccountIndex) { let who = ensure_signed(origin)?; @@ -203,6 +209,7 @@ decl_module! { /// - Up to one reserve operation. /// - One event. /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex) { ensure_root(origin)?; diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index b54109083dc0d524c127668573aea89c02b14c87..41e56b584f550332fe1b91f5763fa31765086d75 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-membership" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,15 +10,15 @@ description = "FRAME membership management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -31,3 +31,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 129f3c4003bdd306a8feedcd4f2d39ffb6ddfa72..8f086fa2f32114a6b2e17bc8d5f228cfee070d08 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -25,11 +25,10 @@ use sp_std::prelude::*; use frame_support::{ decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers}, + traits::{ChangeMembers, InitializeMembers, EnsureOrigin}, weights::SimpleDispatchInfo, }; use frame_system::{self as system, ensure_root, ensure_signed}; -use sp_runtime::traits::EnsureOrigin; pub trait Trait: frame_system::Trait { /// The overarching event type. diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index b3d333f369fbd901c5fa2d597fe3bedb063462ea..f965e1dddcb09687f4cdc193c62f09e60f4d12e5 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-metadata" -version = "11.0.0-alpha.3" +version = "11.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,10 +9,10 @@ repository = "https://github.com/paritytech/substrate/" description = "Decodable variant of the RuntimeMetadata." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } [features] default = ["std"] @@ -22,3 +22,6 @@ std = [ "sp-core/std", "serde", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/metadata/src/lib.rs b/frame/metadata/src/lib.rs index 5ecb9f3443876ddb21f8ceb440e9505c6bc6773e..bec69999b2187338ea48c5f57941105c70fbc691 100644 --- a/frame/metadata/src/lib.rs +++ b/frame/metadata/src/lib.rs @@ -277,6 +277,7 @@ pub enum StorageHasher { Twox128, Twox256, Twox64Concat, + Identity, } /// A storage entry type. @@ -288,7 +289,8 @@ pub enum StorageEntryType { hasher: StorageHasher, key: DecodeDifferentStr, value: DecodeDifferentStr, - is_linked: bool, + // is_linked flag previously, unused now to keep backwards compat + unused: bool, }, DoubleMap { hasher: StorageHasher, diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 07c84c439ffaa57dedb4e8a2da8647e9d02460df..ea88021d2528b8897c02a721ffc6fcf64564358d 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-nicks" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,16 +10,16 @@ description = "FRAME pallet for nick management" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -32,3 +32,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 4fa94b575fac18aeb629f9b1af69b4383cabe62e..ae005e2500b85ab3da72aa94746681d1eaa01f3a 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -40,11 +40,11 @@ use sp_std::prelude::*; use sp_runtime::{ - traits::{StaticLookup, EnsureOrigin, Zero} + traits::{StaticLookup, Zero} }; use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, - traits::{Currency, ReservableCurrency, OnUnbalanced, Get}, + traits::{Currency, EnsureOrigin, ReservableCurrency, OnUnbalanced, Get}, weights::SimpleDispatchInfo, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -78,7 +78,7 @@ pub trait Trait: frame_system::Trait { decl_storage! { trait Store for Module as Nicks { /// The lookup table for names. - NameOf: map hasher(blake2_256) T::AccountId => Option<(Vec, BalanceOf)>; + NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; } } @@ -171,6 +171,7 @@ decl_module! { /// - One storage read/write. /// - One event. /// # + #[weight = SimpleDispatchInfo::FixedNormal(70_000)] fn clear_name(origin) { let sender = ensure_signed(origin)?; diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index 99c52e40885378f8eccee76a75887ef216fdf38f..eab95dbd048e16054065aba215e726144836cdd7 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-offences" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,18 +9,18 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME offences pallet" [dependencies] -pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +pallet-balances = { version = "2.0.0-alpha.5", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -34,3 +34,7 @@ std = [ "frame-support/std", "frame-system/std", ] +runtime-benchmarks = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/offences/benchmarking/Cargo.toml b/frame/offences/benchmarking/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..e343f1ff0c1ee18cf8d7bb6d73e8c5b535d80d35 --- /dev/null +++ b/frame/offences/benchmarking/Cargo.toml @@ -0,0 +1,43 @@ +[package] +name = "pallet-offences-benchmarking" +version = "2.0.0-alpha.5" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME offences pallet benchmarking" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } + +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/staking" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../../benchmarking" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../../system" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../support" } +pallet-im-online = { version = "2.0.0-alpha.5", default-features = false, path = "../../im-online" } +pallet-offences = { version = "2.0.0-alpha.5", default-features = false, features = ["runtime-benchmarks"], path = "../../offences" } +pallet-staking = { version = "2.0.0-alpha.5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../../session" } +sp-io = { path = "../../../primitives/io", default-features = false, version = "2.0.0-alpha.5"} + + +[features] +default = ["std"] +std = [ + "sp-runtime/std", + "sp-std/std", + "sp-staking/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "pallet-offences/std", + "pallet-im-online/std", + "pallet-staking/std", + "pallet-session/std", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..a88714a89a7faf82176d0e1feed568db42f8b285 --- /dev/null +++ b/frame/offences/benchmarking/src/lib.rs @@ -0,0 +1,175 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Offences pallet benchmarking. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::prelude::*; +use sp_std::vec; + +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account}; +use frame_support::traits::{Currency, OnInitialize}; + +use sp_runtime::{Perbill, traits::{Convert, StaticLookup}}; +use sp_staking::offence::ReportOffence; + +use pallet_im_online::{Trait as ImOnlineTrait, Module as ImOnline, UnresponsivenessOffence}; +use pallet_offences::{Trait as OffencesTrait, Module as Offences}; +use pallet_staking::{ + Module as Staking, Trait as StakingTrait, RewardDestination, ValidatorPrefs, + Exposure, IndividualExposure, ElectionStatus +}; +use pallet_session::Trait as SessionTrait; +use pallet_session::historical::{Trait as HistoricalTrait, IdentificationTuple}; + +const SEED: u32 = 0; + +const MAX_USERS: u32 = 1000; +const MAX_REPORTERS: u32 = 100; +const MAX_OFFENDERS: u32 = 100; +const MAX_NOMINATORS: u32 = 100; +const MAX_DEFERRED_OFFENCES: u32 = 100; + +pub struct Module(Offences); + +pub trait Trait: SessionTrait + StakingTrait + OffencesTrait + ImOnlineTrait + HistoricalTrait {} + +fn create_offender(n: u32, nominators: u32) -> Result { + let stash: T::AccountId = account("stash", n, SEED); + let controller: T::AccountId = account("controller", n, SEED); + let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance(); + + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup.clone(), + amount.clone(), + reward_destination.clone(), + )?; + + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate(RawOrigin::Signed(controller.clone()).into(), validator_prefs)?; + + let mut individual_exposures = vec![]; + + // Create n nominators + for i in 0 .. nominators { + let nominator_stash: T::AccountId = account("nominator stash", n * MAX_NOMINATORS + i, SEED); + let nominator_controller: T::AccountId = account("nominator controller", n * MAX_NOMINATORS + i, SEED); + let nominator_controller_lookup: ::Source = T::Lookup::unlookup(nominator_controller.clone()); + + Staking::::bond( + RawOrigin::Signed(nominator_stash.clone()).into(), + nominator_controller_lookup.clone(), + amount, + reward_destination, + )?; + + let selected_validators: Vec<::Source> = vec![controller_lookup.clone()]; + Staking::::nominate(RawOrigin::Signed(nominator_controller.clone()).into(), selected_validators)?; + + individual_exposures.push(IndividualExposure { + who: nominator_controller.clone(), + value: amount.clone(), + }); + } + + let exposure = Exposure { + total: amount.clone() * n.into(), + own: amount, + others: individual_exposures, + }; + let current_era = 0u32; + Staking::::add_era_stakers(current_era.into(), stash.clone().into(), exposure); + + Ok(controller) +} + +fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result>, &'static str> { + let mut offenders: Vec = vec![]; + + for i in 0 .. num_offenders { + let offender = create_offender::(i, num_nominators)?; + offenders.push(offender); + } + + Ok(offenders.iter() + .map(|id| + ::ValidatorIdOf::convert(id.clone()) + .expect("failed to get validator id from account id")) + .map(|validator_id| + ::FullIdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification")) + .collect::>>()) +} + +benchmarks! { + _ { + let u in 1 .. MAX_USERS => (); + let r in 1 .. MAX_REPORTERS => (); + let o in 1 .. MAX_OFFENDERS => (); + let n in 1 .. MAX_NOMINATORS => (); + let d in 1 .. MAX_DEFERRED_OFFENCES => (); + } + + report_offence { + let r in ...; + let o in ...; + let n in ...; + + let mut reporters = vec![]; + + for i in 0 .. r { + let reporter = account("reporter", i, SEED); + reporters.push(reporter); + } + + let offenders = make_offenders::(o, n).expect("failed to create offenders"); + let keys = ImOnline::::keys(); + + let offence = UnresponsivenessOffence { + session_index: 0, + validator_set_count: keys.len() as u32, + offenders, + }; + + }: { + let _ = ::ReportUnresponsiveness::report_offence(reporters, offence); + } + + on_initialize { + let d in ...; + + Staking::::put_election_status(ElectionStatus::Closed); + + let mut deferred_offences = vec![]; + + for i in 0 .. d { + deferred_offences.push((vec![], vec![], 0u32)); + } + + Offences::::set_deferred_offences(deferred_offences); + + }: { + Offences::::on_initialize(u.into()); + } +} diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 27983cbb5332e57e95724999338e850dfd77f7c9..40f39ab5f2a0b3519a9878eae5e9b1d5568c2d7e 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -26,10 +26,12 @@ mod tests; use sp_std::vec::Vec; use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, + decl_module, decl_event, decl_storage, Parameter, debug, + weights::{Weight, SimpleDispatchInfo, WeighData}, }; -use sp_runtime::traits::Hash; +use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ + SessionIndex, offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails, OffenceError}, }; use codec::{Encode, Decode}; @@ -41,6 +43,13 @@ type OpaqueTimeSlot = Vec; /// A type alias for a report identifier. type ReportIdOf = ::Hash; +/// Type of data stored as a deferred offence +pub type DeferredOffenceOf = ( + Vec::AccountId, ::IdentificationTuple>>, + Vec, + SessionIndex, +); + /// Offences trait pub trait Trait: frame_system::Trait { /// The overarching event type. @@ -54,11 +63,17 @@ pub trait Trait: frame_system::Trait { decl_storage! { trait Store for Module as Offences { /// The primary structure that holds all offence records keyed by report identifiers. - Reports get(fn reports): map hasher(blake2_256) ReportIdOf => Option>; + Reports get(fn reports): + map hasher(twox_64_concat) ReportIdOf + => Option>; + + /// Deferred reports that have been rejected by the offence handler and need to be submitted + /// at a later time. + DeferredOffences get(deferred_offences): Vec>; /// A vector of reports of the same kind that happened at the same time slot. ConcurrentReportsIndex: - double_map hasher(blake2_256) Kind, hasher(blake2_256) OpaqueTimeSlot + double_map hasher(twox_64_concat) Kind, hasher(twox_64_concat) OpaqueTimeSlot => Vec>; /// Enumerates all reports of a kind along with the time they happened. @@ -67,24 +82,54 @@ decl_storage! { /// /// Note that the actual type of this mapping is `Vec`, this is because values of /// different types are not supported at the moment so we are doing the manual serialization. - ReportsByKindIndex: map hasher(blake2_256) Kind => Vec; // (O::TimeSlot, ReportIdOf) + ReportsByKindIndex: map hasher(twox_64_concat) Kind => Vec; // (O::TimeSlot, ReportIdOf) } } decl_event!( pub enum Event { /// There is an offence reported of the given `kind` happened at the `session_index` and - /// (kind-specific) time slot. This event is not deposited for duplicate slashes. - Offence(Kind, OpaqueTimeSlot), + /// (kind-specific) time slot. This event is not deposited for duplicate slashes. last + /// element indicates of the offence was applied (true) or queued (false). + Offence(Kind, OpaqueTimeSlot, bool), } ); decl_module! { - /// Offences module, currently just responsible for taking offence reports. pub struct Module for enum Call where origin: T::Origin { fn deposit_event() = default; + + fn on_runtime_upgrade() -> Weight { + Reports::::remove_all(); + ConcurrentReportsIndex::::remove_all(); + ReportsByKindIndex::remove_all(); + + SimpleDispatchInfo::default().weigh_data(()) + } + + fn on_initialize(now: T::BlockNumber) -> Weight { + // only decode storage if we can actually submit anything again. + if T::OnOffenceHandler::can_report() { + >::mutate(|deferred| { + // keep those that fail to be reported again. An error log is emitted here; this + // should not happen if staking's `can_report` is implemented properly. + deferred.retain(|(o, p, s)| { + T::OnOffenceHandler::on_offence(&o, &p, *s).map_err(|_| { + debug::native::error!( + target: "pallet-offences", + "re-submitting a deferred slash returned Err at {}. This should not happen with pallet-staking", + now, + ); + }).is_err() + }) + }) + } + + SimpleDispatchInfo::default().weigh_data(()) + } } } + impl> ReportOffence for Module where @@ -107,9 +152,6 @@ where None => return Err(OffenceError::DuplicateReport), }; - // Deposit the event. - Self::deposit_event(Event::Offence(O::ID, time_slot.encode())); - let offenders_count = concurrent_offenders.len() as u32; // The amount new offenders are slashed @@ -118,17 +160,42 @@ where let slash_perbill: Vec<_> = (0..concurrent_offenders.len()) .map(|_| new_fraction.clone()).collect(); - T::OnOffenceHandler::on_offence( + let applied = Self::report_or_store_offence( &concurrent_offenders, &slash_perbill, offence.session_index(), ); + // Deposit the event. + Self::deposit_event(Event::Offence(O::ID, time_slot.encode(), applied)); + Ok(()) } } impl Module { + /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case + /// it fails. Returns false in case it has to store the offence. + fn report_or_store_offence( + concurrent_offenders: &[OffenceDetails], + slash_perbill: &[Perbill], + session_index: SessionIndex, + ) -> bool { + match T::OnOffenceHandler::on_offence( + &concurrent_offenders, + &slash_perbill, + session_index, + ) { + Ok(_) => true, + Err(_) => { + >::mutate(|d| + d.push((concurrent_offenders.to_vec(), slash_perbill.to_vec(), session_index)) + ); + false + } + } + } + /// Compute the ID for the given report properties. /// /// The report id depends on the offence kind, time slot and the id of offender. @@ -182,6 +249,11 @@ impl Module { None } } + + #[cfg(feature = "runtime-benchmarks")] + pub fn set_deferred_offences(offences: Vec>) { + >::put(offences); + } } struct TriageOutcome { diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index a003ad69157fcf5e8bf2f9c296aea3dd010e1b7d..e464200396ebde9be263ee75ea610813203d34c1 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -43,6 +43,7 @@ pub struct OnOffenceHandler; thread_local! { pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); + pub static CAN_REPORT: RefCell = RefCell::new(true); } impl offence::OnOffenceHandler for OnOffenceHandler { @@ -50,11 +51,25 @@ impl offence::OnOffenceHandler for OnOff _offenders: &[OffenceDetails], slash_fraction: &[Perbill], _offence_session: SessionIndex, - ) { - ON_OFFENCE_PERBILL.with(|f| { - *f.borrow_mut() = slash_fraction.to_vec(); - }); + ) -> Result<(), ()> { + if >::can_report() { + ON_OFFENCE_PERBILL.with(|f| { + *f.borrow_mut() = slash_fraction.to_vec(); + }); + + Ok(()) + } else { + Err(()) + } } + + fn can_report() -> bool { + CAN_REPORT.with(|c| *c.borrow()) + } +} + +pub fn set_can_report(can_report: bool) { + CAN_REPORT.with(|c| *c.borrow_mut() = can_report); } pub fn with_on_offence_fractions) -> R>(f: F) -> R { @@ -113,7 +128,9 @@ impl_outer_event! { pub fn new_test_ext() -> sp_io::TestExternalities { let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - t.into() + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } /// Offences module. diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 0ed98427c65f8eba4d02be1e679dc1e8db52d791..3179a0752318f1340ae924599734423b793fb012 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -21,9 +21,10 @@ use super::*; use crate::mock::{ Offences, System, Offence, TestEvent, KIND, new_test_ext, with_on_offence_fractions, - offence_reports, + offence_reports, set_can_report, }; use sp_runtime::Perbill; +use frame_support::traits::OnInitialize; use frame_system::{EventRecord, Phase}; #[test] @@ -129,8 +130,8 @@ fn should_deposit_event() { assert_eq!( System::events(), vec![EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode())), + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), topics: vec![], }] ); @@ -164,8 +165,8 @@ fn doesnt_deposit_event_for_dups() { assert_eq!( System::events(), vec![EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode())), + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), topics: vec![], }] ); @@ -212,3 +213,54 @@ fn should_properly_count_offences() { ); }); } + +#[test] +fn should_queue_and_resubmit_rejected_offence() { + new_test_ext().execute_with(|| { + set_can_report(false); + + // will get deferred + let offence = Offence { + validator_set_count: 5, + time_slot: 42, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 1); + // event also indicates unapplied. + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), + topics: vec![], + }] + ); + + // will not dequeue + Offences::on_initialize(2); + + // again + let offence = Offence { + validator_set_count: 5, + time_slot: 62, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 2); + + set_can_report(true); + + // can be submitted + let offence = Offence { + validator_set_count: 5, + time_slot: 72, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 2); + + Offences::on_initialize(3); + assert_eq!(Offences::deferred_offences().len(), 0); + }) +} diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 4e4ce76fee7edb7d39402be261c078925f63bb79..acd1c216884c036715f0f52b997cb0b78d5c6669 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,15 +10,15 @@ description = "FRAME randomness collective flip pallet" [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } [features] default = ["std"] @@ -30,3 +30,6 @@ std = [ "sp-runtime/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 0ded7dd6b0c64e5c932cf42d2f7478dea1ef4723..fdc465b4dc3bb0e9163bc750595a290138a7b24d 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -35,12 +35,13 @@ //! ### Example - Get random seed for the current block //! //! ``` -//! use frame_support::{decl_module, dispatch, traits::Randomness}; +//! use frame_support::{decl_module, dispatch, traits::Randomness, weights::SimpleDispatchInfo}; //! //! pub trait Trait: frame_system::Trait {} //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = SimpleDispatchInfo::default()] //! pub fn random_module_example(origin) -> dispatch::DispatchResult { //! let _random_seed = >::random_seed(); //! Ok(()) @@ -54,7 +55,10 @@ use sp_std::{prelude::*, convert::TryInto}; use sp_runtime::traits::Hash; -use frame_support::{decl_module, decl_storage, traits::Randomness}; +use frame_support::{ + decl_module, decl_storage, traits::Randomness, + weights::{Weight, SimpleDispatchInfo, WeighData} +}; use safe_mix::TripletMix; use codec::Encode; use frame_system::Trait; @@ -69,7 +73,7 @@ fn block_number_to_index(block_number: T::BlockNumber) -> usize { decl_module! { pub struct Module for enum Call where origin: T::Origin { - fn on_initialize(block_number: T::BlockNumber) { + fn on_initialize(block_number: T::BlockNumber) -> Weight { let parent_hash = >::parent_hash(); >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { @@ -78,6 +82,8 @@ decl_module! { let index = block_number_to_index::(block_number); values[index] = parent_hash; }); + + SimpleDispatchInfo::default().weigh_data(()) } } } @@ -156,9 +162,11 @@ mod tests { use sp_runtime::{ Perbill, testing::Header, - traits::{BlakeTwo256, OnInitialize, Header as _, IdentityLookup}, + traits::{BlakeTwo256, Header as _, IdentityLookup}, + }; + use frame_support::{ + impl_outer_origin, parameter_types, weights::Weight, traits::{Randomness, OnInitialize}, }; - use frame_support::{impl_outer_origin, parameter_types, weights::Weight, traits::Randomness}; #[derive(Clone, PartialEq, Eq)] pub struct Test; diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 80456aa375dd2e569300aac339642f5124930397..3347014f6e8cafc53a28626aad5124e35fa25b6a 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-recovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,17 @@ description = "FRAME account recovery pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -33,3 +33,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 1b3c9416388f94315a811f5330bf797f4f927a8e..c055f2bd97c67777893d493feb4d51c3934048ed 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -161,6 +161,7 @@ use frame_support::{ decl_module, decl_event, decl_storage, decl_error, ensure, Parameter, RuntimeDebug, weights::{GetDispatchInfo, SimpleDispatchInfo, FunctionOf}, traits::{Currency, ReservableCurrency, Get, BalanceStatus}, + dispatch::PostDispatchInfo, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -178,7 +179,7 @@ pub trait Trait: frame_system::Trait { type Event: From> + Into<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo; + type Call: Parameter + Dispatchable + GetDispatchInfo; /// The currency mechanism. type Currency: ReservableCurrency; @@ -238,7 +239,7 @@ decl_storage! { trait Store for Module as Recovery { /// The set of recoverable accounts and their recovery configuration. pub Recoverable get(fn recovery_config): - map hasher(blake2_256) T::AccountId + map hasher(twox_64_concat) T::AccountId => Option, T::AccountId>>; /// Active recovery attempts. @@ -253,7 +254,7 @@ decl_storage! { /// /// Map from the user who can access it to the recovered account. pub Proxy get(fn proxy): - map hasher(blake2_256) T::AccountId => Option; + map hasher(blake2_128_concat) T::AccountId => Option; } } @@ -348,6 +349,7 @@ decl_module! { let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; ensure!(&target == &account, Error::::NotAllowed); call.dispatch(frame_system::RawOrigin::Signed(account).into()) + .map(|_| ()).map_err(|e| e.error) } /// Allow ROOT to bypass the recovery process and set an a rescuer account @@ -645,6 +647,7 @@ decl_module! { /// # /// - One storage mutation to check account is recovered by `who`. O(1) /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn cancel_recovered(origin, account: T::AccountId) { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index a5b7731c2286a3c4af60ee3ec54f64060c0953a5..9327ece572212af48e739eac3725925592c9bc95 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -21,12 +21,13 @@ use super::*; use frame_support::{ impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, weights::Weight, + traits::{OnInitialize, OnFinalize}, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup, OnInitialize, OnFinalize}, testing::Header, + Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, }; use crate as recovery; diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index 9c644291c906dcbf00ab0185d3e1065fc8d54fb1..fb993043a5b005f4e94976f99a9701af6ab83b79 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -240,7 +240,7 @@ fn initiate_recovery_works() { assert_eq!(Balances::reserved_balance(1), 10); // Recovery status object is created correctly let recovery_status = ActiveRecovery { - created: 1, + created: 0, deposit: 10, friends: vec![], }; @@ -288,7 +288,7 @@ fn vouch_recovery_works() { assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // Final recovery status object is updated correctly let recovery_status = ActiveRecovery { - created: 1, + created: 0, deposit: 10, friends: vec![2, 3, 4], }; diff --git a/frame/scheduler/Cargo.toml b/frame/scheduler/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..531de95867bbbaace5557f7b0e2d7e3d057088b8 --- /dev/null +++ b/frame/scheduler/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "pallet-scheduler" +version = "2.0.0-alpha.5" +authors = ["Parity Technologies "] +edition = "2018" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet" + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +frame-benchmarking = { version = "2.0.0-alpha.4", default-features = false, path = "../benchmarking" } +frame-support = { version = "2.0.0-alpha.4", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.4", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-alpha.4", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.4", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.4", default-features = false, path = "../../primitives/io" } + +[dev-dependencies] +sp-core = { version = "2.0.0-alpha.4", path = "../../primitives/core", default-features = false } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-runtime/std", + "frame-benchmarking/std", + "frame-support/std", + "frame-system/std", + "sp-io/std", + "sp-std/std" +] diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..f70204cb1a6b7201c89173cbe0dba6d5f10236ab --- /dev/null +++ b/frame/scheduler/src/lib.rs @@ -0,0 +1,522 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # Scheduler +//! +//! \# Scheduler +//! +//! - \[`scheduler::Trait`](./trait.Trait.html) +//! - \[`Call`](./enum.Call.html) +//! - \[`Module`](./struct.Module.html) +//! +//! \## Overview +//! +//! // Short description of pallet's purpose. +//! // Links to Traits that should be implemented. +//! // What this pallet is for. +//! // What functionality the pallet provides. +//! // When to use the pallet (use case examples). +//! // How it is used. +//! // Inputs it uses and the source of each input. +//! // Outputs it produces. +//! +//! \## Terminology +//! +//! \## Goals +//! +//! \## Interface +//! +//! \### Dispatchable Functions + +// Ensure we're `no_std` when compiling for Wasm. +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_std::prelude::*; +use codec::{Encode, Decode}; +use sp_runtime::{RuntimeDebug, traits::{Zero, One}}; +use frame_support::{ + dispatch::{Dispatchable, DispatchResult, Parameter}, decl_module, decl_storage, decl_event, + traits::{Get, schedule}, + weights::{GetDispatchInfo, Weight}, +}; +use frame_system::{self as system}; + +/// Our pallet's configuration trait. All our types and constants go in here. If the +/// pallet is dependent on specific other pallets, then their configuration traits +/// should be added to our implied traits list. +/// +/// `system::Trait` should always be included in our implied traits. +pub trait Trait: system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The aggregated origin which the dispatch will take. + type Origin: From>; + + /// The aggregated call type. + type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo; + + /// The maximum weight that may be scheduled per block for any dispatchables of less priority + /// than `schedule::HARD_DEADLINE`. + type MaximumWeight: Get; +} + +/// Just a simple index for naming period tasks. +pub type PeriodicIndex = u32; +/// The location of a scheduled task that can be used to remove it. +pub type TaskAddress = (BlockNumber, u32); + +/// Information regarding an item to be executed in the future. +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct Scheduled { + /// The unique identity for this task, if there is one. + maybe_id: Option>, + /// This task's priority. + priority: schedule::Priority, + /// The call to be dispatched. + call: Call, + /// If the call is periodic, then this points to the information concerning that. + maybe_periodic: Option>, +} + +decl_storage! { + trait Store for Module as Scheduler { + /// Items to be executed, indexed by the block number that they should be executed on. + pub Agenda: map hasher(twox_64_concat) T::BlockNumber + => Vec::Call, T::BlockNumber>>>; + + /// Lookup from identity to the block number and index of the task. + Lookup: map hasher(twox_64_concat) Vec => Option>; + } +} + +decl_event!( + pub enum Event where ::BlockNumber { + Scheduled(BlockNumber), + Dispatched(TaskAddress, Option>, DispatchResult), + } +); + +decl_module! { + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + + fn on_initialize(now: T::BlockNumber) -> Weight { + let limit = T::MaximumWeight::get(); + let mut queued = Agenda::::take(now).into_iter() + .enumerate() + .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) + .collect::>(); + queued.sort_by_key(|(_, s)| s.priority); + let mut result = 0; + let unused_items = queued.into_iter() + .enumerate() + .scan(0, |cumulative_weight, (order, (index, s))| { + *cumulative_weight += s.call.get_dispatch_info().weight; + Some((order, index, *cumulative_weight, s)) + }) + .filter_map(|(order, index, cumulative_weight, mut s)| { + if s.priority <= schedule::HARD_DEADLINE || cumulative_weight <= limit || order == 0 { + let r = s.call.clone().dispatch(system::RawOrigin::Root.into()); + let maybe_id = s.maybe_id.clone(); + if let &Some((period, count)) = &s.maybe_periodic { + if count > 1 { + s.maybe_periodic = Some((period, count - 1)); + } else { + s.maybe_periodic = None; + } + let next = now + period; + if let Some(ref id) = s.maybe_id { + let next_index = Agenda::::decode_len(now + period).unwrap_or(0) as u32; + Lookup::::insert(id, (next, next_index)); + } + Agenda::::append_or_insert(next, &[Some(s)][..]); + } else { + if let Some(ref id) = s.maybe_id { + Lookup::::remove(id); + } + } + Self::deposit_event(RawEvent::Dispatched( + (now, index), + maybe_id, + r.map(|_| ()).map_err(|e| e.error) + )); + result = cumulative_weight; + None + } else { + Some(Some(s)) + } + }) + .collect::>(); + if !unused_items.is_empty() { + let next = now + One::one(); + Agenda::::append_or_insert(next, &unused_items[..]); + } + result + } + } +} + +impl schedule::Anon::Call> for Module { + type Address = TaskAddress; + + fn schedule( + when: T::BlockNumber, + maybe_periodic: Option>, + priority: schedule::Priority, + call: ::Call + ) -> Self::Address { + // sanitize maybe_periodic + let maybe_periodic = maybe_periodic + .filter(|p| p.1 > 1 && !p.0.is_zero()) + // Remove one from the number of repetitions since we will schedule one now. + .map(|(p, c)| (p, c - 1)); + let s = Some(Scheduled { maybe_id: None, priority, call, maybe_periodic }); + Agenda::::append_or_insert(when, &[s][..]); + (when, Agenda::::decode_len(when).unwrap_or(1) as u32 - 1) + } + + fn cancel((when, index): Self::Address) -> Result<(), ()> { + if let Some(s) = Agenda::::mutate(when, |agenda| agenda.get_mut(index as usize).and_then(Option::take)) { + if let Some(id) = s.maybe_id { + Lookup::::remove(id) + } + Ok(()) + } else { + Err(()) + } + } +} + +impl schedule::Named::Call> for Module { + type Address = TaskAddress; + + fn schedule_named( + id: impl Encode, + when: T::BlockNumber, + maybe_periodic: Option>, + priority: schedule::Priority, + call: ::Call, + ) -> Result { + // determine id and ensure it is unique + let id = id.encode(); + if Lookup::::contains_key(&id) { + return Err(()) + } + + // sanitize maybe_periodic + let maybe_periodic = maybe_periodic + .filter(|p| p.1 > 1 && !p.0.is_zero()) + // Remove one from the number of repetitions since we will schedule one now. + .map(|(p, c)| (p, c - 1)); + + let s = Scheduled { maybe_id: Some(id.clone()), priority, call, maybe_periodic }; + Agenda::::append_or_insert(when, &[Some(s)][..]); + let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + let address = (when, index); + Lookup::::insert(&id, &address); + Ok(address) + } + + fn cancel_named(id: impl Encode) -> Result<(), ()> { + if let Some((when, index)) = id.using_encoded(|d| Lookup::::take(d)) { + let i = index as usize; + Agenda::::mutate(when, |agenda| if let Some(s) = agenda.get_mut(i) { *s = None }); + Ok(()) + } else { + Err(()) + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use frame_support::{ + impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, + traits::{OnInitialize, OnFinalize, schedule::{Anon, Named}}, + weights::{DispatchClass, FunctionOf} + }; + use sp_core::H256; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + }; + use crate as scheduler; + + mod logger { + use super::*; + use std::cell::RefCell; + use frame_system::ensure_root; + + thread_local! { + static LOG: RefCell> = RefCell::new(Vec::new()); + } + pub fn log() -> Vec { + LOG.with(|log| log.borrow().clone()) + } + pub trait Trait: system::Trait { + type Event: From + Into<::Event>; + } + decl_storage! { + trait Store for Module as Logger { + } + } + decl_event! { + pub enum Event { + Logged(u32, Weight), + } + } + decl_module! { + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + + #[weight = FunctionOf( + |args: (&u32, &Weight)| *args.1, + |_: (&u32, &Weight)| DispatchClass::Normal, + true + )] + fn log(origin, i: u32, weight: Weight) { + ensure_root(origin)?; + Self::deposit_event(Event::Logged(i, weight)); + LOG.with(|log| { + log.borrow_mut().push(i); + }) + } + } + } + } + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + system::System, + logger::Logger, + } + } + + impl_outer_event! { + pub enum Event for Test { + system, + logger, + scheduler, + } + } + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl system::Trait for Test { + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + impl logger::Trait for Test { + type Event = (); + } + parameter_types! { + pub const MaximumWeight: Weight = 10_000; + } + impl Trait for Test { + type Event = (); + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; + } + type System = system::Module; + type Logger = logger::Module; + type Scheduler = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sp_io::TestExternalities { + let t = system::GenesisConfig::default().build_storage::().unwrap(); + t.into() + } + + fn run_to_block(n: u64) { + while System::block_number() < n { + Scheduler::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + Scheduler::on_initialize(System::block_number()); + } + } + + #[test] + fn basic_scheduling_works() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32]); + }); + } + + #[test] + fn periodic_scheduling_works() { + new_test_ext().execute_with(|| { + // at #4, every 3 blocks, 3 times. + Scheduler::schedule(4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(6); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(7); + assert_eq!(logger::log(), vec![42u32, 42u32]); + run_to_block(9); + assert_eq!(logger::log(), vec![42u32, 42u32]); + run_to_block(10); + assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + }); + } + + #[test] + fn cancel_named_scheduling_works_with_normal_cancel() { + new_test_ext().execute_with(|| { + // at #4. + Scheduler::schedule_named(1u32, 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); + let i = Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_ok!(Scheduler::cancel_named(1u32)); + assert_ok!(Scheduler::cancel(i)); + run_to_block(100); + assert!(logger::log().is_empty()); + }); + } + + #[test] + fn cancel_named_periodic_scheduling_works() { + new_test_ext().execute_with(|| { + // at #4, every 3 blocks, 3 times. + Scheduler::schedule_named(1u32, 4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))).unwrap(); + // same id results in error. + assert!(Scheduler::schedule_named(1u32, 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).is_err()); + // different id is ok. + Scheduler::schedule_named(2u32, 8, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(6); + assert_ok!(Scheduler::cancel_named(1u32)); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_weight_limits() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(5); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_hard_deadlines_more() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_priority_ordering() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 1, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![69u32, 42u32]); + }); + } + + #[test] + fn scheduler_respects_priority_ordering_with_soft_deadlines() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 255, Call::Logger(logger::Call::log(42, 5000))); + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 5000))); + Scheduler::schedule(4, None, 126, Call::Logger(logger::Call::log(2600, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![2600u32]); + run_to_block(5); + assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32]); + }); + } + + #[test] + fn initialize_weight_is_correct() { + new_test_ext().execute_with(|| { + Scheduler::schedule(1, None, 255, Call::Logger(logger::Call::log(3, 1000))); + Scheduler::schedule(1, None, 128, Call::Logger(logger::Call::log(42, 5000))); + Scheduler::schedule(1, None, 127, Call::Logger(logger::Call::log(69, 5000))); + Scheduler::schedule(1, None, 126, Call::Logger(logger::Call::log(2600, 6000))); + let weight = Scheduler::on_initialize(1); + assert_eq!(weight, 6000); + let weight = Scheduler::on_initialize(2); + assert_eq!(weight, 10000); + let weight = Scheduler::on_initialize(3); + assert_eq!(weight, 1000); + let weight = Scheduler::on_initialize(4); + assert_eq!(weight, 0); + }); + } +} diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index a1b7b39d8cd9ffc2b548522168b0599ca5958678..b878c5bb47559d3d8eab8cb842d8eb00182abed7 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-scored-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,17 +9,17 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet for scored pools" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -32,3 +32,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index e3854c752441cf1501194f411bbf59c23c5e419b..2602d389626a702a2fea4e156e84e3fd52ad9d3e 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -61,6 +61,7 @@ //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = frame_support::weights::SimpleDispatchInfo::default()] //! pub fn candidate(origin) -> dispatch::DispatchResult { //! let who = ensure_signed(origin)?; //! @@ -95,11 +96,12 @@ use sp_std::{ }; use frame_support::{ decl_module, decl_storage, decl_event, ensure, decl_error, - traits::{ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, + traits::{EnsureOrigin, ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, + weights::{Weight, SimpleDispatchInfo, WeighData}, }; use frame_system::{self as system, ensure_root, ensure_signed}; use sp_runtime::{ - traits::{EnsureOrigin, AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}, + traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}, }; type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; @@ -165,7 +167,7 @@ decl_storage! { /// check if a candidate is already in the pool, without having to /// iterate over the entire pool (the `Pool` is not sorted by /// `T::AccountId`, but by `T::Score` instead). - CandidateExists get(fn candidate_exists): map hasher(blake2_256) T::AccountId => bool; + CandidateExists get(fn candidate_exists): map hasher(twox_64_concat) T::AccountId => bool; /// The current membership, stored as an ordered Vec. Members get(fn members): Vec; @@ -189,8 +191,8 @@ decl_storage! { >::insert(who, true); }); - /// Sorts the `Pool` by score in a descending order. Entities which - /// have a score of `None` are sorted to the beginning of the vec. + // Sorts the `Pool` by score in a descending order. Entities which + // have a score of `None` are sorted to the beginning of the vec. pool.sort_by_key(|(_, maybe_score)| Reverse(maybe_score.unwrap_or_default()) ); @@ -245,11 +247,12 @@ decl_module! { /// Every `Period` blocks the `Members` set is refreshed from the /// highest scoring members in the pool. - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { if n % T::Period::get() == Zero::zero() { let pool = >::get(); >::refresh_members(pool, ChangeReceiver::MembershipChanged); } + SimpleDispatchInfo::default().weigh_data(()) } /// Add `origin` to the pool of candidates. @@ -263,6 +266,7 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn submit_candidacy(origin) { let who = ensure_signed(origin)?; ensure!(!>::contains_key(&who), Error::::AlreadyInPool); @@ -292,6 +296,7 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the transactor in the `Pool`. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn withdraw_candidacy( origin, index: u32 @@ -311,6 +316,7 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of `dest` in the `Pool`. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn kick( origin, dest: ::Source, @@ -335,6 +341,7 @@ decl_module! { /// /// The `index` parameter of this function must be set to /// the index of the `dest` in the `Pool`. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn score( origin, dest: ::Source, @@ -375,6 +382,7 @@ decl_module! { /// (this happens each `Period`). /// /// May only be called from root. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn change_member_count(origin, count: u32) { ensure_root(origin)?; >::put(&count); diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 4b21339505ded6b55d9a520595af2df1c8277fca..8d87a20f757b223443b2b134fb1d66f0ab793b53 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -19,8 +19,8 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop}; -use sp_runtime::traits::{OnInitialize, BadOrigin}; +use frame_support::{assert_ok, assert_noop, traits::OnInitialize}; +use sp_runtime::traits::BadOrigin; type ScoredPool = Module; type System = frame_system::Module; diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 74ca9fe67bb41968927f5050a7db90466c2bc6e4..f12a8b4a71f2fc8df7f81ac50098c32ffd9cd7aa 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-session" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,20 +10,20 @@ description = "FRAME sessions pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } -sp-trie = { optional = true, path = "../../primitives/trie", default-features = false , version = "2.0.0-alpha.2"} -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../timestamp" } +sp-trie = { optional = true, path = "../../primitives/trie", default-features = false, version = "2.0.0-alpha.5"} +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] @@ -40,3 +40,6 @@ std = [ "sp-trie/std", "sp-io/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/session/benchmarking/Cargo.toml b/frame/session/benchmarking/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..140116c82c640568f8683b0ed9309ab51088d672 --- /dev/null +++ b/frame/session/benchmarking/Cargo.toml @@ -0,0 +1,42 @@ +[package] +name = "pallet-session-benchmarking" +version = "2.0.0-alpha.5" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME sessions pallet benchmarking" + +[dependencies] +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../../system" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../../benchmarking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../support" } +pallet-staking = { version = "2.0.0-alpha.5", default-features = false, features = ["runtime-benchmarks"], path = "../../staking" } +pallet-session = { version = "2.0.0-alpha.5", default-features = false, path = "../../session" } + +[dev-dependencies] +serde = { version = "1.0.101" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +pallet-staking-reward-curve = { version = "2.0.0-alpha.5", path = "../../staking/reward-curve" } +sp-io ={ path = "../../../primitives/io", version = "2.0.0-alpha.5"} +pallet-timestamp = { version = "2.0.0-alpha.5", path = "../../timestamp" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../../balances" } + +[features] +default = ["std"] +std = [ + "sp-std/std", + "sp-runtime/std", + "frame-system/std", + "frame-benchmarking/std", + "frame-support/std", + "pallet-staking/std", + "pallet-session/std", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..3b91c2fdc5194f56086b139eb33a71efc468e050 --- /dev/null +++ b/frame/session/benchmarking/src/lib.rs @@ -0,0 +1,76 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Benchmarks for the Session Pallet. +// This is separated into its own crate due to cyclic dependency issues. + +#![cfg_attr(not(feature = "std"), no_std)] + +mod mock; + +use sp_std::prelude::*; +use sp_std::vec; + +use frame_system::RawOrigin; +use frame_benchmarking::benchmarks; + +use pallet_session::*; +use pallet_session::Module as Session; + +use pallet_staking::{ + MAX_NOMINATIONS, + benchmarking::create_validator_with_nominators, +}; + +pub struct Module(pallet_session::Module); + +pub trait Trait: pallet_session::Trait + pallet_staking::Trait {} + +benchmarks! { + _ { } + + set_keys { + let n in 1 .. MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::default(); + let proof: Vec = vec![0,1,2,3]; + }: _(RawOrigin::Signed(v_controller), keys, proof) + + purge_keys { + let n in 1 .. MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::default(); + let proof: Vec = vec![0,1,2,3]; + Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; + }: _(RawOrigin::Signed(v_controller)) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_keys::()); + assert_ok!(test_benchmark_purge_keys::()); + }); + } +} diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..ff7965efcacebbe6e7dd64a5999f9fcf196664cb --- /dev/null +++ b/frame/session/benchmarking/src/mock.rs @@ -0,0 +1,186 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Mock file for staking fuzzing. + +#![cfg(test)] + +use sp_runtime::traits::{Convert, SaturatedConversion, IdentityLookup}; +use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; + +type AccountId = u64; +type AccountIndex = u32; +type BlockNumber = u64; +type Balance = u64; + +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Staking = pallet_staking::Module; +type Session = pallet_session::Module; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + pallet_staking::Staking, + } +} + +pub struct CurrencyToVoteHandler; +impl Convert for CurrencyToVoteHandler { + fn convert(x: u64) -> u64 { + x + } +} +impl Convert for CurrencyToVoteHandler { + fn convert(x: u128) -> u64 { + x.saturated_into() + } +} + +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct Test; + +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = sp_core::H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = sp_runtime::testing::Header; + type Event = (); + type BlockHashCount = (); + type MaximumBlockWeight = (); + type AvailableBlockRatio = (); + type MaximumBlockLength = (); + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (Balances,); +} +parameter_types! { + pub const ExistentialDeposit: Balance = 10; +} +impl pallet_balances::Trait for Test { + type Balance = Balance; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} + +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} +impl pallet_timestamp::Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; +} +impl pallet_session::historical::Trait for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub foo: sp_runtime::testing::UintAuthorityId, + } +} + +pub struct TestSessionHandler; +impl pallet_session::SessionHandler for TestSessionHandler { + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + + fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + + fn on_new_session( + _: bool, + _: &[(AccountId, Ks)], + _: &[(AccountId, Ks)], + ) {} + + fn on_disabled(_: usize) {} +} + +impl pallet_session::Trait for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; + type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; + type SessionHandler = TestSessionHandler; + type Event = (); + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type DisabledValidatorsThreshold = (); +} +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const UnsignedPriority: u64 = 1 << 20; +} + +pub type Extrinsic = sp_runtime::testing::TestXt; +type SubmitTransaction = frame_system::offchain::TransactionSubmitter< + sp_runtime::testing::UintAuthorityId, + Test, + Extrinsic, +>; + +impl pallet_staking::Trait for Test { + type Currency = Balances; + type UnixTime = pallet_timestamp::Module; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = (); + type Event = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = (); + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = (); + type Call = Call; + type SubmitTransaction = SubmitTransaction; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = UnsignedPriority; +} + +impl crate::Trait for Test {} + +pub fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + sp_io::TestExternalities::new(t) +} diff --git a/frame/session/src/historical.rs b/frame/session/src/historical.rs index 91719c27f27ccc59ab7ad1956a0ae1afcb7714c8..f9990dd1e8a7a716f4a557467fe5364602c45fe3 100644 --- a/frame/session/src/historical.rs +++ b/frame/session/src/historical.rs @@ -56,22 +56,18 @@ decl_storage! { trait Store for Module as Session { /// Mapping from historical session indices to session-data root hash and validator count. HistoricalSessions get(fn historical_root): - map hasher(blake2_256) SessionIndex => Option<(T::Hash, ValidatorCount)>; + map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; /// The range of historical sessions we store. [first, last) StoredRange: Option<(SessionIndex, SessionIndex)>; /// Deprecated. CachedObsolete: - map hasher(blake2_256) SessionIndex + map hasher(twox_64_concat) SessionIndex => Option>; } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn on_initialize(_n: T::BlockNumber) { - CachedObsolete::::remove_all(); - } - } + pub struct Module for enum Call where origin: T::Origin {} } impl Module { @@ -314,12 +310,12 @@ impl> frame_support::traits::KeyOwnerProofSystem<(KeyTy mod tests { use super::*; use sp_core::crypto::key_types::DUMMY; - use sp_runtime::{traits::OnInitialize, testing::UintAuthorityId}; + use sp_runtime::testing::UintAuthorityId; use crate::mock::{ NEXT_VALIDATORS, force_new_session, set_next_validators, Test, System, Session, }; - use frame_support::traits::KeyOwnerProofSystem; + use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; type Historical = Module; diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 226003d93345069fbe216e66b9b9c2b613376b70..9346b060fa48a8afd22d01dc1d88d6a6b743a8e4 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -102,16 +102,22 @@ use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; use codec::Decode; use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic, BoundToRuntimeAppPublic}; -use frame_support::weights::SimpleDispatchInfo; -use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys}; +use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys, Saturating}; use sp_staking::SessionIndex; -use frame_support::{ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId}; -use frame_support::{traits::{Get, FindAuthor, ValidatorRegistration}, Parameter}; -use frame_support::dispatch::{self, DispatchResult, DispatchError}; +use frame_support::{ + ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId, Parameter, + traits::{ + Get, FindAuthor, ValidatorRegistration, EstimateNextSessionRotation, EstimateNextNewSession, + }, + dispatch::{self, DispatchResult, DispatchError}, + weights::{Weight, SimpleDispatchInfo, WeighData}, +}; use frame_system::{self as system, ensure_signed}; #[cfg(test)] mod mock; +#[cfg(test)] +mod tests; #[cfg(feature = "historical")] pub mod historical; @@ -143,6 +149,29 @@ impl< } } +impl< + BlockNumber: Rem + Sub + Zero + PartialOrd + Saturating + Clone, + Period: Get, + Offset: Get, +> EstimateNextSessionRotation for PeriodicSessions { + fn estimate_next_session_rotation(now: BlockNumber) -> Option { + let offset = Offset::get(); + let period = Period::get(); + Some(if now > offset { + let block_after_last_session = (now.clone() - offset) % period.clone(); + if block_after_last_session > Zero::zero() { + now.saturating_add( + period.saturating_sub(block_after_last_session) + ) + } else { + Zero::zero() + } + } else { + offset + }) + } +} + /// A trait for managing creation of new validator set. pub trait SessionManager { /// Plan a new session, and optionally provide the new validator set. @@ -326,6 +355,11 @@ pub trait Trait: frame_system::Trait { /// Indicator for when to end the session. type ShouldEndSession: ShouldEndSession; + /// Something that can predict the next session rotation. This should typically come from the + /// same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort estimate. + /// It is helpful to implement [`EstimateNextNewSession`]. + type NextSessionRotation: EstimateNextSessionRotation; + /// Handler for managing new session. type SessionManager: SessionManager; @@ -364,10 +398,10 @@ decl_storage! { DisabledValidators get(fn disabled_validators): Vec; /// The next session keys for a validator. - NextKeys: map hasher(blake2_256) T::ValidatorId => Option; + NextKeys: map hasher(twox_64_concat) T::ValidatorId => Option; /// The owner of a key. The key is the `KeyTypeId` + the encoded key. - KeyOwner: map hasher(blake2_256) (KeyTypeId, Vec) => Option; + KeyOwner: map hasher(twox_64_concat) (KeyTypeId, Vec) => Option; } add_extra_genesis { config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; @@ -465,7 +499,7 @@ decl_module! { /// In this case, purge_keys will need to be called before the account can be removed. /// # #[weight = SimpleDispatchInfo::FixedNormal(150_000)] - fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { + pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { let who = ensure_signed(origin)?; ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); @@ -486,48 +520,24 @@ decl_module! { /// - Reduces system account refs by one on success. /// # #[weight = SimpleDispatchInfo::FixedNormal(150_000)] - fn purge_keys(origin) { + pub fn purge_keys(origin) { let who = ensure_signed(origin)?; Self::do_purge_keys(&who)?; } /// Called when a block is initialized. Will rotate session if it is the last /// block of the current session. - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { if T::ShouldEndSession::should_end_session(n) { Self::rotate_session(); } - } - /// Called when the runtime is upgraded. - fn on_runtime_upgrade() { - Self::migrate(); + SimpleDispatchInfo::default().weigh_data(()) } } } impl Module { - /// Move keys from NextKeys and KeyOwner, if any exist. - fn migrate() { - use frame_support::storage::migration::{put_storage_value, StorageIterator}; - sp_runtime::print("Migrating session's double-maps..."); - - let prefix = { - const DEDUP_KEY_PREFIX: &[u8] = b":session:keys"; - let encoded_prefix_key_hash = codec::Encode::encode(&DEDUP_KEY_PREFIX); - let mut h = sp_io::hashing::twox_64(&encoded_prefix_key_hash[..]).to_vec(); - h.extend(&encoded_prefix_key_hash[..]); - h - }; - - for (hash, value) in StorageIterator::::with_suffix(b"Session", b"NextKeys", &prefix[..]).drain() { - put_storage_value(b"Session", b"NextKeys", &hash, value); - } - for (hash, value) in StorageIterator::::with_suffix(b"Session", b"KeyOwner", &prefix[..]).drain() { - put_storage_value(b"Session", b"KeyOwner", &hash, value); - } - } - /// Move on to next session. Register new validator set and session keys. Changes /// to the validator set have a session of delay to take effect. This allows for /// equivocation punishment after a fork. @@ -756,307 +766,10 @@ impl> FindAuthor } } -#[cfg(test)] -mod tests { - use super::*; - use frame_support::assert_ok; - use sp_core::crypto::key_types::DUMMY; - use sp_runtime::{traits::OnInitialize, testing::UintAuthorityId}; - use mock::{ - NEXT_VALIDATORS, SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, - set_next_validators, set_session_length, session_changed, Test, Origin, System, Session, - reset_before_session_end_called, before_session_end_called, - }; - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); - sp_io::TestExternalities::new(t) - } - - fn initialize_block(block: u64) { - SESSION_CHANGED.with(|l| *l.borrow_mut() = false); - System::set_block_number(block); - Session::on_initialize(block); - } - - #[test] - fn simple_setup_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert_eq!(Session::validators(), vec![1, 2, 3]); - }); - } - - #[test] - fn put_get_keys() { - new_test_ext().execute_with(|| { - Session::put_keys(&10, &UintAuthorityId(10).into()); - assert_eq!(Session::load_keys(&10), Some(UintAuthorityId(10).into())); - }) - } - - #[test] - fn keys_cleared_on_kill() { - let mut ext = new_test_ext(); - ext.execute_with(|| { - assert_eq!(Session::validators(), vec![1, 2, 3]); - assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); - - let id = DUMMY; - assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - - assert!(!System::allow_death(&1)); - assert_ok!(Session::purge_keys(Origin::signed(1))); - assert!(System::allow_death(&1)); - - assert_eq!(Session::load_keys(&1), None); - assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); - }) - } - - #[test] - fn authorities_should_track_validators() { - reset_before_session_end_called(); - - new_test_ext().execute_with(|| { - set_next_validators(vec![1, 2]); - force_new_session(); - initialize_block(1); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2, 3]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(2); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - set_next_validators(vec![1, 2, 4]); - assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); - force_new_session(); - initialize_block(3); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); - assert!(before_session_end_called()); - - force_new_session(); - initialize_block(4); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2, 4]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); - }); - } - - #[test] - fn should_work_with_early_exit() { - new_test_ext().execute_with(|| { - set_session_length(10); - - initialize_block(1); - assert_eq!(Session::current_index(), 0); - - initialize_block(2); - assert_eq!(Session::current_index(), 0); - - force_new_session(); - initialize_block(3); - assert_eq!(Session::current_index(), 1); - - initialize_block(9); - assert_eq!(Session::current_index(), 1); - - initialize_block(10); - assert_eq!(Session::current_index(), 2); - }); - } - - #[test] - fn session_change_should_work() { - new_test_ext().execute_with(|| { - // Block 1: No change - initialize_block(1); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 2: Session rollover, but no change. - initialize_block(2); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 3: Set new key for validator 2; no visible change. - initialize_block(3); - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 4: Session rollover; no visible change. - initialize_block(4); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 5: No change. - initialize_block(5); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 6: Session rollover; authority 2 changes. - initialize_block(6); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)]); - }); - } - - #[test] - fn duplicates_are_not_allowed() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Session::on_initialize(1); - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); - assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); - - // is fine now that 1 has migrated off. - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); - }); - } - - #[test] - fn session_changed_flag_works() { - reset_before_session_end_called(); - - new_test_ext().execute_with(|| { - TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); - - force_new_session(); - initialize_block(1); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(2); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - Session::disable_index(0); - force_new_session(); - initialize_block(3); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(4); - assert!(session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(5); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); - force_new_session(); - initialize_block(6); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - // changing the keys of a validator leads to change. - assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); - force_new_session(); - initialize_block(7); - assert!(session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - // while changing the keys of a non-validator does not. - force_new_session(); - initialize_block(7); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - }); - } - - #[test] - fn periodic_session_works() { - struct Period; - struct Offset; - - impl Get for Period { - fn get() -> u64 { 10 } - } - - impl Get for Offset { - fn get() -> u64 { 3 } - } - - - type P = PeriodicSessions; - - for i in 0..3 { - assert!(!P::should_end_session(i)); - } - - assert!(P::should_end_session(3)); - - for i in (1..10).map(|i| 3 + i) { - assert!(!P::should_end_session(i)); - } - - assert!(P::should_end_session(13)); - } - - #[test] - fn session_keys_generate_output_works_as_set_keys_input() { - new_test_ext().execute_with(|| { - let new_keys = mock::MockSessionKeys::generate(None); - assert_ok!( - Session::set_keys( - Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), - vec![], - ) - ); - }); - } - - #[test] - fn return_true_if_more_than_third_is_disabled() { - new_test_ext().execute_with(|| { - set_next_validators(vec![1, 2, 3, 4, 5, 6, 7]); - force_new_session(); - initialize_block(1); - // apply the new validator set - force_new_session(); - initialize_block(2); - - assert_eq!(Session::disable_index(0), false); - assert_eq!(Session::disable_index(1), false); - assert_eq!(Session::disable_index(2), true); - assert_eq!(Session::disable_index(3), true); - }); +impl EstimateNextNewSession for Module { + /// This session module always calls new_session and next_session at the same time, hence we + /// do a simple proxy and pass the function to next rotation. + fn estimate_next_new_session(now: T::BlockNumber) -> Option { + T::NextSessionRotation::estimate_next_session_rotation(now) } } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 9d64285b900f8645d0f2d76cbe736bc7a908194f..a888dcfb28e992ba61881155fcc24d796cca085c 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -150,6 +150,16 @@ pub fn reset_before_session_end_called() { BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); } +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + GenesisConfig:: { + keys: NEXT_VALIDATORS.with(|l| + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() + ), + }.assimilate_storage(&mut t).unwrap(); + sp_io::TestExternalities::new(t) +} + #[derive(Clone, Eq, PartialEq)] pub struct Test; @@ -205,6 +215,7 @@ impl Trait for Test { type Keys = MockSessionKeys; type Event = (); type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = (); } #[cfg(feature = "historical")] diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..abfd9f738b614e2d9078ddfe02c07a1f3d862a32 --- /dev/null +++ b/frame/session/src/tests.rs @@ -0,0 +1,309 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +// Tests for the Session Pallet + +use super::*; +use frame_support::{traits::OnInitialize, assert_ok}; +use sp_core::crypto::key_types::DUMMY; +use sp_runtime::testing::UintAuthorityId; +use mock::{ + SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, + set_next_validators, set_session_length, session_changed, Origin, System, Session, + reset_before_session_end_called, before_session_end_called, new_test_ext, +}; + +fn initialize_block(block: u64) { + SESSION_CHANGED.with(|l| *l.borrow_mut() = false); + System::set_block_number(block); + Session::on_initialize(block); +} + +#[test] +fn simple_setup_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + assert_eq!(Session::validators(), vec![1, 2, 3]); + }); +} + +#[test] +fn put_get_keys() { + new_test_ext().execute_with(|| { + Session::put_keys(&10, &UintAuthorityId(10).into()); + assert_eq!(Session::load_keys(&10), Some(UintAuthorityId(10).into())); + }) +} + +#[test] +fn keys_cleared_on_kill() { + let mut ext = new_test_ext(); + ext.execute_with(|| { + assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); + + let id = DUMMY; + assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); + + assert!(!System::allow_death(&1)); + assert_ok!(Session::purge_keys(Origin::signed(1))); + assert!(System::allow_death(&1)); + + assert_eq!(Session::load_keys(&1), None); + assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); + }) +} + +#[test] +fn authorities_should_track_validators() { + reset_before_session_end_called(); + + new_test_ext().execute_with(|| { + set_next_validators(vec![1, 2]); + force_new_session(); + initialize_block(1); + assert_eq!(Session::queued_keys(), vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + ]); + assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(2); + assert_eq!(Session::queued_keys(), vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + ]); + assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + set_next_validators(vec![1, 2, 4]); + assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); + force_new_session(); + initialize_block(3); + assert_eq!(Session::queued_keys(), vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ]); + assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); + + force_new_session(); + initialize_block(4); + assert_eq!(Session::queued_keys(), vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ]); + assert_eq!(Session::validators(), vec![1, 2, 4]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); + }); +} + +#[test] +fn should_work_with_early_exit() { + new_test_ext().execute_with(|| { + set_session_length(10); + + initialize_block(1); + assert_eq!(Session::current_index(), 0); + + initialize_block(2); + assert_eq!(Session::current_index(), 0); + + force_new_session(); + initialize_block(3); + assert_eq!(Session::current_index(), 1); + + initialize_block(9); + assert_eq!(Session::current_index(), 1); + + initialize_block(10); + assert_eq!(Session::current_index(), 2); + }); +} + +#[test] +fn session_change_should_work() { + new_test_ext().execute_with(|| { + // Block 1: No change + initialize_block(1); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + + // Block 2: Session rollover, but no change. + initialize_block(2); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + + // Block 3: Set new key for validator 2; no visible change. + initialize_block(3); + assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + + // Block 4: Session rollover; no visible change. + initialize_block(4); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + + // Block 5: No change. + initialize_block(5); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + + // Block 6: Session rollover; authority 2 changes. + initialize_block(6); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)]); + }); +} + +#[test] +fn duplicates_are_not_allowed() { + new_test_ext().execute_with(|| { + System::set_block_number(1); + Session::on_initialize(1); + assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); + assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); + + // is fine now that 1 has migrated off. + assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); + }); +} + +#[test] +fn session_changed_flag_works() { + reset_before_session_end_called(); + + new_test_ext().execute_with(|| { + TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); + + force_new_session(); + initialize_block(1); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(2); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + Session::disable_index(0); + force_new_session(); + initialize_block(3); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(4); + assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(5); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); + force_new_session(); + initialize_block(6); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + // changing the keys of a validator leads to change. + assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); + force_new_session(); + initialize_block(7); + assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + // while changing the keys of a non-validator does not. + force_new_session(); + initialize_block(7); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + }); +} + +#[test] +fn periodic_session_works() { + struct Period; + struct Offset; + + impl Get for Period { + fn get() -> u64 { 10 } + } + + impl Get for Offset { + fn get() -> u64 { 3 } + } + + + type P = PeriodicSessions; + + for i in 0..3 { + assert!(!P::should_end_session(i)); + } + + assert!(P::should_end_session(3)); + + for i in (1..10).map(|i| 3 + i) { + assert!(!P::should_end_session(i)); + } + + assert!(P::should_end_session(13)); +} + +#[test] +fn session_keys_generate_output_works_as_set_keys_input() { + new_test_ext().execute_with(|| { + let new_keys = mock::MockSessionKeys::generate(None); + assert_ok!( + Session::set_keys( + Origin::signed(2), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + vec![], + ) + ); + }); +} + +#[test] +fn return_true_if_more_than_third_is_disabled() { + new_test_ext().execute_with(|| { + set_next_validators(vec![1, 2, 3, 4, 5, 6, 7]); + force_new_session(); + initialize_block(1); + // apply the new validator set + force_new_session(); + initialize_block(2); + + assert_eq!(Session::disable_index(0), false); + assert_eq!(Session::disable_index(1), false); + assert_eq!(Session::disable_index(2), true); + assert_eq!(Session::disable_index(3), true); + }); +} diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index 35b1c5c4a45159464b783ac201955b81ebd0d260..be419fb63fe2d0800dde5802e9ce1cf02de52b14 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-society" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,17 @@ description = "FRAME society pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -34,3 +34,10 @@ std = [ "frame-support/std", "frame-system/std", ] +runtime-benchmarks = [ + "sp-runtime/runtime-benchmarks", + "frame-system/runtime-benchmarks", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index 38dc491b805b77e4dd1e5c92705a85b31d2243a6..2061c21d9c5f992cb6a6254918a9133d14483ca6 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -256,14 +256,14 @@ use codec::{Encode, Decode}; use sp_runtime::{Percent, ModuleId, RuntimeDebug, traits::{ StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, - TrailingZeroInput, CheckedSub, EnsureOrigin + TrailingZeroInput, CheckedSub } }; use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult}; -use frame_support::weights::SimpleDispatchInfo; +use frame_support::weights::{SimpleDispatchInfo, Weight, WeighData}; use frame_support::traits::{ Currency, ReservableCurrency, Randomness, Get, ChangeMembers, BalanceStatus, - ExistenceRequirement::AllowDeath, + ExistenceRequirement::AllowDeath, EnsureOrigin }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -414,7 +414,7 @@ decl_storage! { /// The set of suspended candidates. pub SuspendedCandidates get(suspended_candidate): - map hasher(blake2_256) T::AccountId + map hasher(twox_64_concat) T::AccountId => Option<(BalanceOf, BidKind>)>; /// Amount of our account balance that is specifically for the next round's bid(s). @@ -432,19 +432,19 @@ decl_storage! { }): Vec; /// The set of suspended members. - pub SuspendedMembers get(fn suspended_member): map hasher(blake2_256) T::AccountId => bool; + pub SuspendedMembers get(fn suspended_member): map hasher(twox_64_concat) T::AccountId => bool; /// The current bids, stored ordered by the value of the bid. Bids: Vec>>; /// Members currently vouching or banned from vouching again - Vouching get(fn vouching): map hasher(blake2_256) T::AccountId => Option; + Vouching get(fn vouching): map hasher(twox_64_concat) T::AccountId => Option; /// Pending payouts; ordered by block number, with the amount that should be paid out. - Payouts: map hasher(blake2_256) T::AccountId => Vec<(T::BlockNumber, BalanceOf)>; + Payouts: map hasher(twox_64_concat) T::AccountId => Vec<(T::BlockNumber, BalanceOf)>; /// The ongoing number of losing votes cast by the member. - Strikes: map hasher(blake2_256) T::AccountId => StrikeCount; + Strikes: map hasher(twox_64_concat) T::AccountId => StrikeCount; /// Double map from Candidate -> Voter -> (Maybe) Vote. Votes: double_map @@ -1028,7 +1028,7 @@ decl_module! { Self::deposit_event(RawEvent::NewMaxMembers(max)); } - fn on_initialize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { let mut members = vec![]; // Run a candidate/membership rotation @@ -1045,6 +1045,8 @@ decl_module! { } Self::rotate_challenge(&mut members); } + + SimpleDispatchInfo::default().weigh_data(()) } } } @@ -1143,6 +1145,12 @@ impl EnsureOrigin for EnsureFounder { (r, _) => Err(T::Origin::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + let founder = Founder::::get().expect("society founder should exist"); + T::Origin::from(system::RawOrigin::Signed(founder)) + } } /// Pick an item at pseudo-random from the slice, given the `rng`. `None` iff the slice is empty. diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 158f139df5673e9568e247122f40ed1d64c6b0c7..a66a5e6e047d44230b35f62fcf1a78043b5e1085 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -18,14 +18,16 @@ use super::*; -use frame_support::{impl_outer_origin, parameter_types, ord_parameter_types}; +use frame_support::{ + impl_outer_origin, parameter_types, ord_parameter_types, traits::{OnInitialize, OnFinalize} +}; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. use sp_runtime::{ Perbill, testing::Header, - traits::{BlakeTwo256, IdentityLookup, OnInitialize, OnFinalize}, + traits::{BlakeTwo256, IdentityLookup}, }; use frame_system::EnsureSignedBy; diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index ef4bb60a29e6a593d1c2f4560edbe6dd73537b43..aac5616e4ea18cd2ff64d9dc0a4fe0da46e24c00 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,31 +10,51 @@ description = "FRAME pallet staking" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/phragmen" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0-alpha.2", features = ["historical"], path = "../session", default-features = false } -pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-phragmen = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/phragmen" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.5", features = ["historical"], path = "../session", default-features = false } +pallet-authorship = { version = "2.0.0-alpha.5", default-features = false, path = "../authorship" } +sp-application-crypto = { version = "2.0.0-alpha.4", default-features = false, path = "../../primitives/application-crypto" } +static_assertions = "1.1.0" + +# Optional imports for tesing-utils feature +pallet-indices = { version = "2.0.0-alpha.4", optional = true, path = "../indices", default-features = false } +sp-core = { version = "2.0.0-alpha.4", optional = true, path = "../../primitives/core", default-features = false } +rand = { version = "0.7.3", optional = true, default-features = false } + +# Optional imports for benchmarking +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +rand_chacha = { version = "0.2", default-features = false, optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -pallet-timestamp = { version = "2.0.0-alpha.2", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0-alpha.2", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-storage = { version = "2.0.0-alpha.5", path = "../../primitives/storage" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +pallet-timestamp = { version = "2.0.0-alpha.5", path = "../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-alpha.5", path = "../staking/reward-curve" } +substrate-test-utils = { version = "2.0.0-alpha.5", path = "../../test-utils" } +frame-benchmarking = { version = "2.0.0-alpha.5", path = "../benchmarking" } +rand_chacha = { version = "0.2" } +parking_lot = "0.10.0" +env_logger = "0.7.1" +hex = "0.4" [features] -migrate = [] +testing-utils = [ + "std", + "pallet-indices/std", + "sp-core/std", + "rand/std", +] default = ["std"] std = [ "serde", - "sp-keyring", "codec/std", "sp-std/std", "sp-phragmen/std", @@ -45,4 +65,13 @@ std = [ "pallet-session/std", "frame-system/std", "pallet-authorship/std", + "sp-application-crypto/std", + "sp-core/std", ] +runtime-benchmarks = [ + "rand_chacha", + "frame-benchmarking", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/staking/fuzz/.gitignore b/frame/staking/fuzz/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..572e03bdf321b6cc3a99488183436905cefd086d --- /dev/null +++ b/frame/staking/fuzz/.gitignore @@ -0,0 +1,4 @@ + +target +corpus +artifacts diff --git a/frame/staking/fuzz/Cargo.lock b/frame/staking/fuzz/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..f6e8cfa08d6675144979b3a5ef10eaa8f9dcf0d3 --- /dev/null +++ b/frame/staking/fuzz/Cargo.lock @@ -0,0 +1,2189 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "ahash" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" +dependencies = [ + "const-random", +] + +[[package]] +name = "aho-corasick" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +dependencies = [ + "memchr", +] + +[[package]] +name = "arbitrary" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16971f2f0ce65c5cf2a1546cc6a0af102ecb11e265ddaa9433fb3e5bfdf676a4" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "autocfg" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad235dabf00f36301792cfe82499880ba54c6486be094d1047b02bacb67c14e8" +dependencies = [ + "backtrace-sys", + "cfg-if", + "libc", + "rustc-demangle", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e17b52e737c40a7d75abca20b29a19a0eb7ba9fc72c5a72dd282a0a3c2c0dc35" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitmask" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5da9b3d9f6f585199287a473f4f8dfab6566cf827d15c00c219f53c645687ead" + +[[package]] +name = "bitvec" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993f74b4c99c1908d156b8d2e0fb6277736b0ecbd833982fd1241d39b2766a6" + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "bumpalo" +version = "3.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f359dc14ff8911330a51ef78022d376f25ed00248912803b58f00cb1c27f742" + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "c2-chacha" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "214238caa1bf3a496ec3392968969cab8549f96ff30652c9e56885329315f6bb" +dependencies = [ + "ppv-lite86", +] + +[[package]] +name = "cc" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "clear_on_drop" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" +dependencies = [ + "cc", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "const-random" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" +dependencies = [ + "const-random-macro", + "proc-macro-hack", +] + +[[package]] +name = "const-random-macro" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" +dependencies = [ + "getrandom", + "proc-macro-hack", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle 1.0.0", +] + +[[package]] +name = "curve25519-dalek" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" +dependencies = [ + "byteorder", + "clear_on_drop", + "digest", + "rand_core 0.3.1", + "subtle 2.2.2", +] + +[[package]] +name = "curve25519-dalek" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" +dependencies = [ + "byteorder", + "digest", + "rand_core 0.5.1", + "subtle 2.2.2", + "zeroize 1.1.0", +] + +[[package]] +name = "derive_more" +version = "0.99.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a806e96c59a76a5ba6e18735b6cf833344671e61e7863f2edb5c518ea2cac95c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.0-pre.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" +dependencies = [ + "clear_on_drop", + "curve25519-dalek 2.0.0", + "rand 0.7.3", + "sha2", +] + +[[package]] +name = "environmental" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" + +[[package]] +name = "failure" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fixed-hash" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3367952ceb191f4ab95dd5685dc163ac539e36202f9fcfd0cb22f9f9c542fefc" +dependencies = [ + "byteorder", + "libc", + "rand 0.7.3", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "frame-benchmarking" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "sp-api", + "sp-io", + "sp-runtime", + "sp-runtime-interface", + "sp-std", +] + +[[package]] +name = "frame-metadata" +version = "11.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-std", +] + +[[package]] +name = "frame-support" +version = "2.0.0-alpha.3" +dependencies = [ + "bitmask", + "frame-metadata", + "frame-support-procedural", + "impl-trait-for-tuples", + "log", + "once_cell", + "parity-scale-codec", + "paste", + "serde", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-state-machine", + "sp-std", + "tracing", +] + +[[package]] +name = "frame-support-procedural" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support-procedural-tools", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support-procedural-tools-derive", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-support-procedural-tools-derive" +version = "2.0.0-alpha.3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "frame-system" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", + "sp-version", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "futures" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c329ae8753502fb44ae4fc2b622fa2a94652c41e795143765ba0927f92ab780" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c77d04ce8edd9cb903932b608268b3fffec4163dc053b3b402bf47eac1f1a8" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f25592f769825e89b92358db00d26f965761e094951ac44d3663ef25b7ac464a" + +[[package]] +name = "futures-executor" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f674f3e1bcb15b37284a90cedf55afdba482ab061c407a9c0ebbd0f3109741ba" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a638959aa96152c7a4cddf50fcb1e3fede0583b27157c26e67d6f99904090dc6" + +[[package]] +name = "futures-macro" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3466821b4bc114d95b087b850a724c6f83115e929bc88f1fa98a3304a944c8a6" + +[[package]] +name = "futures-task" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b0a34e53cf6cdcd0178aa573aed466b646eb3db769570841fda0c7ede375a27" + +[[package]] +name = "futures-util" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22766cf25d64306bedf0384da004d05c9974ab104fcc4528f1236181c18004c5" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-utils", + "proc-macro-hack", + "proc-macro-nested", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" +dependencies = [ + "ahash", + "autocfg 0.1.7", +] + +[[package]] +name = "heck" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20564e78d53d2bb135c343b3f47714a56af2061f1c928fdb541dc7b9fdd94205" +dependencies = [ + "unicode-segmentation", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac", + "digest", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest", + "generic-array", + "hmac", +] + +[[package]] +name = "impl-codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-serde" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" + +[[package]] +name = "js-sys" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1cb931d43e71f560c81badb0191596562bafad2be06a3f9025b845c847c60df5" +dependencies = [ + "wasm-bindgen", +] + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" + +[[package]] +name = "libfuzzer-sys" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb789afcc589a08928d1e466087445ab740a0f70a2ee23d9349a0e3723d65e1b" +dependencies = [ + "arbitrary", + "cc", +] + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest", + "hmac-drbg", + "rand 0.7.3", + "sha2", + "subtle 2.2.2", + "typenum", +] + +[[package]] +name = "lock_api" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "memory-db" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "198831fe8722331a395bc199a5d08efbc197497ef354cb4c77b969c02ffc0fc4" +dependencies = [ + "ahash", + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0942b357c1b4d0dc43ba724674ec89c3218e6ca2b3e8269e7cb53bcecd2f6e" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.4.2", + "zeroize 1.1.0", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg 1.0.0", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +dependencies = [ + "autocfg 1.0.0", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da4dc79f9e6c81bef96148c8f6b8e72ad4541caa4a24373e900a36da07de03a3" +dependencies = [ + "autocfg 1.0.0", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +dependencies = [ + "autocfg 1.0.0", +] + +[[package]] +name = "once_cell" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" +dependencies = [ + "parking_lot 0.9.0", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "pallet-authorship" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-authorship", + "sp-core", + "sp-inherents", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-balances" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-io", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-indices" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-keyring", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-session" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "pallet-timestamp", + "parity-scale-codec", + "serde", + "sp-io", + "sp-runtime", + "sp-staking", + "sp-std", + "sp-trie", +] + +[[package]] +name = "pallet-staking" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "pallet-authorship", + "pallet-indices", + "pallet-session", + "parity-scale-codec", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-core", + "sp-io", + "sp-phragmen", + "sp-runtime", + "sp-staking", + "sp-std", + "static_assertions", +] + +[[package]] +name = "pallet-staking-fuzz" +version = "0.0.0" +dependencies = [ + "frame-support", + "frame-system", + "libfuzzer-sys", + "pallet-balances", + "pallet-indices", + "pallet-session", + "pallet-staking", + "pallet-staking-reward-curve", + "pallet-timestamp", + "parity-scale-codec", + "rand 0.7.3", + "sp-core", + "sp-io", + "sp-phragmen", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "pallet-staking-reward-curve" +version = "2.0.0-alpha.3" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pallet-timestamp" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "impl-trait-for-tuples", + "parity-scale-codec", + "serde", + "sp-inherents", + "sp-runtime", + "sp-std", + "sp-timestamp", +] + +[[package]] +name = "parity-scale-codec" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910" +dependencies = [ + "arrayvec 0.5.1", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1476e40bf8f5c6776e9600983435821ca86eb9819d74a6207cca69d091406a" +dependencies = [ + "cfg-if", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.10.0", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api", + "parking_lot_core 0.6.2", + "rustc_version", +] + +[[package]] +name = "parking_lot" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" +dependencies = [ + "lock_api", + "parking_lot_core 0.7.0", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "rustc_version", + "smallvec 0.6.13", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "smallvec 1.3.0", + "winapi", +] + +[[package]] +name = "paste" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e1afe738d71b1ebab5f1207c055054015427dbfc7bbe9ee1266894156ec046" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d4dc4a7f6f743211c5aab239640a65091535d97d43d92a52bca435a640892bb" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac", +] + +[[package]] +name = "pin-utils" +version = "0.1.0-alpha.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5894c618ce612a3fa23881b152b608bafb8c56cfc22f434a3ba3120b40f7b587" + +[[package]] +name = "ppv-lite86" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" + +[[package]] +name = "primitive-types" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4336f4f5d5524fa60bcbd6fe626f9223d8142a50e7053e979acdf0da41ab975" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde 0.3.0", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro-nested" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" + +[[package]] +name = "proc-macro2" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha 0.2.1", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.3.1", +] + +[[package]] +name = "rand_chacha" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03a2a90da8c7523f554344f921aa97283eadf6ac484a6d2a7d0212fa7f8d6853" +dependencies = [ + "c2-chacha", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" + +[[package]] +name = "regex" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-syntax" +version = "0.6.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1132f845907680735a84409c3bebc64d1364a5683ffbce899550cd09d5eaefc1" + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "schnorrkel" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eacd8381b3c37840c9c9f40472af529e49975bdcbc24f83c31059fd6539023d3" +dependencies = [ + "curve25519-dalek 1.2.3", + "failure", + "merlin", + "rand 0.6.5", + "rand_core 0.4.2", + "rand_os", + "sha2", + "subtle 2.2.2", + "zeroize 0.9.3", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "send_wrapper" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0eddf2e8f50ced781f288c19f18621fa72a3779e3cb58dbf23b07469b0abeb4" + +[[package]] +name = "serde" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + +[[package]] +name = "slab" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c111b5bd5695e56cffe5129854aa230b39c93a305372fdbb2668ca2394eea9f8" + +[[package]] +name = "smallvec" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" + +[[package]] +name = "sp-api" +version = "2.0.0-alpha.3" +dependencies = [ + "hash-db", + "parity-scale-codec", + "sp-api-proc-macro", + "sp-core", + "sp-runtime", + "sp-state-machine", + "sp-std", + "sp-version", +] + +[[package]] +name = "sp-api-proc-macro" +version = "2.0.0-alpha.3" +dependencies = [ + "blake2-rfc", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-application-crypto" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "2.0.0-alpha.3" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-authorship" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "sp-inherents", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "2.0.0-alpha.3" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "ed25519-dalek", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde 0.3.0", + "lazy_static", + "libsecp256k1", + "log", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.10.0", + "primitive-types", + "rand 0.7.3", + "regex", + "rustc-hex", + "schnorrkel", + "serde", + "sha2", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize 1.1.0", +] + +[[package]] +name = "sp-debug-derive" +version = "2.0.0-alpha.3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.8.0-alpha.3" +dependencies = [ + "environmental", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-inherents" +version = "2.0.0-alpha.3" +dependencies = [ + "derive_more", + "parity-scale-codec", + "parking_lot 0.10.0", + "sp-core", + "sp-std", +] + +[[package]] +name = "sp-io" +version = "2.0.0-alpha.3" +dependencies = [ + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "sp-core", + "sp-externalities", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-trie", + "sp-wasm-interface", +] + +[[package]] +name = "sp-keyring" +version = "2.0.0-alpha.3" +dependencies = [ + "lazy_static", + "sp-core", + "sp-runtime", + "strum", +] + +[[package]] +name = "sp-panic-handler" +version = "2.0.0-alpha.3" +dependencies = [ + "backtrace", + "log", +] + +[[package]] +name = "sp-phragmen" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-phragmen-compact", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-phragmen-compact" +version = "2.0.0-dev" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-runtime" +version = "2.0.0-alpha.3" +dependencies = [ + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "2.0.0-alpha.3" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-staking" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-state-machine" +version = "0.8.0-alpha.3" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.10.0", + "rand 0.7.3", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-trie", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "2.0.0-alpha.3" + +[[package]] +name = "sp-storage" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-serde 0.2.3", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-timestamp" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-api", + "sp-inherents", + "sp-runtime", + "sp-std", + "wasm-timer", +] + +[[package]] +name = "sp-trie" +version = "2.0.0-alpha.3" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-version" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-serde 0.2.3", + "parity-scale-codec", + "serde", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-wasm-interface" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-std", + "wasmi", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "strum" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6138f8f88a16d90134763314e3fc76fa3ed6a7db4725d6acf9a3ef95a3188d22" +dependencies = [ + "strum_macros", +] + +[[package]] +name = "strum_macros" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "substrate-bip39" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be511be555a3633e71739a79e4ddff6a6aaa6579fa6114182a51d72c3eb93c5" +dependencies = [ + "hmac", + "pbkdf2", + "schnorrkel", + "sha2", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" + +[[package]] +name = "syn" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tiny-bip39" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6848cd8f566953ce1e8faeba12ee23cbdbb0437754792cd857d44628b5685e3" +dependencies = [ + "failure", + "hmac", + "once_cell", + "pbkdf2", + "rand 0.7.3", + "rustc-hash", + "sha2", + "unicode-normalization", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2953ca5148619bc99695c1274cb54c5275bbb913c6adad87e72eaf8db9787f69" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +dependencies = [ + "serde", +] + +[[package]] +name = "tracing" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab" +dependencies = [ + "cfg-if", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" +dependencies = [ + "quote", + "syn", +] + +[[package]] +name = "tracing-core" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "trie-db" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec 1.3.0", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "twox-hash" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" +dependencies = [ + "rand 0.7.3", +] + +[[package]] +name = "typenum" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" + +[[package]] +name = "uint" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" +dependencies = [ + "byteorder", + "crunchy", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +dependencies = [ + "smallvec 1.3.0", +] + +[[package]] +name = "unicode-segmentation" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e83e153d1053cbb5a118eeff7fd5be06ed99153f00dbcd8ae310c5fb2b22edc0" + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasm-bindgen" +version = "0.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3557c397ab5a8e347d434782bcd31fc1483d927a6826804cec05cc792ee2519d" +dependencies = [ + "cfg-if", + "wasm-bindgen-macro", +] + +[[package]] +name = "wasm-bindgen-backend" +version = "0.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0da9c9a19850d3af6df1cb9574970b566d617ecfaf36eb0b706b6f3ef9bd2f8" +dependencies = [ + "bumpalo", + "lazy_static", + "log", + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "457414a91863c0ec00090dba537f88ab955d93ca6555862c29b6d860990b8a8a" +dependencies = [ + "cfg-if", + "js-sys", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f6fde1d36e75a714b5fe0cffbb78978f222ea6baebb726af13c78869fdb4205" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25bda4168030a6412ea8a047e27238cadf56f0e53516e1e83fec0a8b7c786f6d" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "wasm-bindgen-backend", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.59" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc9f36ad51f25b0219a3d4d13b90eb44cd075dff8b6280cca015775d7acaddd8" + +[[package]] +name = "wasm-timer" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "324c5e65a08699c9c4334ba136597ab22b85dccd4b65dd1e36ccf8f723a95b54" +dependencies = [ + "futures", + "js-sys", + "parking_lot 0.9.0", + "pin-utils", + "send_wrapper", + "wasm-bindgen", + "wasm-bindgen-futures", + "web-sys", +] + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "web-sys" +version = "0.3.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "721c6263e2c66fd44501cc5efbfa2b7dfa775d13e4ea38c46299646ed1f9c70a" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/frame/staking/fuzz/Cargo.toml b/frame/staking/fuzz/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..a78fbf17dc8a3a1dd3c8572167d8d1d27692059a --- /dev/null +++ b/frame/staking/fuzz/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "pallet-staking-fuzz" +version = "0.0.0" +authors = ["Automatically generated"] +publish = false +edition = "2018" + +[package.metadata] +cargo-fuzz = true + +[dependencies] +libfuzzer-sys = "0.3" +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +pallet-staking = { version = "2.0.0-alpha.2", path = "..", features = ["testing-utils"] } +pallet-staking-reward-curve = { version = "2.0.0-alpha.2", path = "../reward-curve" } +pallet-session = { version = "2.0.0-alpha.2", path = "../../session" } +pallet-indices = { version = "2.0.0-alpha.2", path = "../../indices" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../../balances" } +pallet-timestamp = { version = "2.0.0-alpha.2", path = "../../timestamp" } +frame-system = { version = "2.0.0-alpha.2", path = "../../system" } +frame-support = { version = "2.0.0-alpha.2", path = "../../support" } +sp-std = { version = "2.0.0-alpha.2", path = "../../../primitives/std" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-phragmen = { version = "2.0.0-alpha.2", path = "../../../primitives/phragmen" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +rand = "0.7.3" + +# Prevent this from interfering with workspaces +[workspace] +members = ["."] + +[[bin]] +name = "submit_solution" +path = "fuzz_targets/submit_solution.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/staking/fuzz/fuzz_targets/mock.rs b/frame/staking/fuzz/fuzz_targets/mock.rs new file mode 100644 index 0000000000000000000000000000000000000000..4bb3437f92368b35b7bfd2ef4f4529214972e6ca --- /dev/null +++ b/frame/staking/fuzz/fuzz_targets/mock.rs @@ -0,0 +1,182 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Mock file for staking fuzzing. + +use sp_runtime::traits::{Convert, SaturatedConversion}; +use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; + +type AccountId = u64; +type AccountIndex = u32; +type BlockNumber = u64; +type Balance = u64; + +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Staking = pallet_staking::Module; +type Indices = pallet_indices::Module; +type Session = pallet_session::Module; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + staking::Staking, + } +} + +pub struct CurrencyToVoteHandler; +impl Convert for CurrencyToVoteHandler { + fn convert(x: u64) -> u64 { + x + } +} +impl Convert for CurrencyToVoteHandler { + fn convert(x: u128) -> u64 { + x.saturated_into() + } +} + +#[derive(Clone, Eq, PartialEq, Debug)] +pub struct Test; + +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = sp_core::H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = Indices; + type Header = sp_runtime::testing::Header; + type Event = (); + type BlockHashCount = (); + type MaximumBlockWeight = (); + type AvailableBlockRatio = (); + type MaximumBlockLength = (); + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (Balances,); +} +parameter_types! { + pub const ExistentialDeposit: Balance = 10; +} +impl pallet_balances::Trait for Test { + type Balance = Balance; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} +impl pallet_indices::Trait for Test { + type AccountIndex = AccountIndex; + type Event = (); + type Currency = Balances; + type Deposit = (); +} +parameter_types! { + pub const MinimumPeriod: u64 = 5; +} +impl pallet_timestamp::Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; +} +impl pallet_session::historical::Trait for Test { + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; +} + +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub foo: sp_runtime::testing::UintAuthorityId, + } +} + +pub struct TestSessionHandler; +impl pallet_session::SessionHandler for TestSessionHandler { + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + + fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + + fn on_new_session( + _: bool, + _: &[(AccountId, Ks)], + _: &[(AccountId, Ks)], + ) {} + + fn on_disabled(_: usize) {} +} + +impl pallet_session::Trait for Test { + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; + type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; + type SessionHandler = TestSessionHandler; + type Event = (); + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type DisabledValidatorsThreshold = (); +} +pallet_staking_reward_curve::build! { + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); +} +parameter_types! { + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub const MaxNominatorRewardedPerValidator: u32 = 64; +} + +pub type Extrinsic = sp_runtime::testing::TestXt; +type SubmitTransaction = frame_system::offchain::TransactionSubmitter< + sp_runtime::testing::UintAuthorityId, + Test, + Extrinsic, +>; + +impl pallet_staking::Trait for Test { + type Currency = Balances; + type Time = pallet_timestamp::Module; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = (); + type Event = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = (); + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = (); + type Call = Call; + type SubmitTransaction = SubmitTransaction; + type KeyType = sp_runtime::testing::UintAuthorityId; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; +} diff --git a/frame/staking/fuzz/fuzz_targets/submit_solution.rs b/frame/staking/fuzz/fuzz_targets/submit_solution.rs new file mode 100644 index 0000000000000000000000000000000000000000..5d1fcf1d7ea850dd033b89e41514cd672848ab24 --- /dev/null +++ b/frame/staking/fuzz/fuzz_targets/submit_solution.rs @@ -0,0 +1,130 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Fuzzing for staking pallet. + +#![no_main] +use libfuzzer_sys::fuzz_target; +use mock::Test; +use pallet_staking::testing_utils::{ + self, USER, get_seq_phragmen_solution, get_weak_solution, setup_chain_stakers, + set_validator_count, signed_account, +}; +use frame_support::assert_ok; +use sp_runtime::{traits::Dispatchable, DispatchError}; + +mod mock; + +#[repr(u32)] +#[allow(dead_code)] +#[derive(Debug, Clone, Copy)] +enum Mode { + /// Initial submission. This will be rather cheap. + InitialSubmission, + /// A better submission that will replace the previous ones. This is the most expensive. + StrongerSubmission, + /// A weak submission that will be rejected. This will be rather cheap. + WeakerSubmission, +} + +pub fn new_test_ext() -> Result { + frame_system::GenesisConfig::default().build_storage::().map(Into::into) +} + +fuzz_target!(|do_reduce: bool| { + let ext = new_test_ext(); + let mode: Mode = unsafe { std::mem::transmute(testing_utils::random(0, 2)) }; + let num_validators = testing_utils::random(50, 500); + let num_nominators = testing_utils::random(200, 2000); + let edge_per_voter = testing_utils::random(1, 16); + let to_elect = testing_utils::random(10, num_validators); + + println!("+++ instance with params {} / {} / {} / {:?} / {}", + num_nominators, + num_validators, + edge_per_voter, + mode, + to_elect, + ); + + ext.unwrap_or_default().execute_with(|| { + // initial setup + set_validator_count::(to_elect); + setup_chain_stakers::( + num_validators, + num_nominators, + edge_per_voter, + ); + + println!("++ Chain setup done."); + + // stuff to submit + let (winners, compact, score) = match mode { + Mode::InitialSubmission => { + /* No need to setup anything */ + get_seq_phragmen_solution::(do_reduce) + }, + Mode::StrongerSubmission => { + let (winners, compact, score) = get_weak_solution::(false); + assert_ok!( + >::submit_election_solution( + signed_account::(USER), + winners, + compact, + score, + ) + ); + get_seq_phragmen_solution::(do_reduce) + }, + Mode::WeakerSubmission => { + let (winners, compact, score) = get_seq_phragmen_solution::(do_reduce); + assert_ok!( + >::submit_election_solution( + signed_account::(USER), + winners, + compact, + score, + ) + ); + get_weak_solution::(false) + } + }; + + println!("++ Submission ready."); + + // must have chosen correct number of winners. + assert_eq!(winners.len() as u32, >::validator_count()); + + // final call and origin + let call = pallet_staking::Call::::submit_election_solution( + winners, + compact, + score, + ); + let caller = signed_account::(USER); + + // actually submit + match mode { + Mode::WeakerSubmission => { + assert_eq!( + call.dispatch(caller.into()).unwrap_err(), + DispatchError::Module { index: 0, error: 11, message: Some("PhragmenWeakSubmission") }, + ); + }, + _ => assert_ok!(call.dispatch(caller.into())), + }; + }) +}); diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index d55813682e62192a28d8c68992524fea41d8aa6d..b3b749e96cd51fa050dfb6233f0c99eadf6ad3fb 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -18,4 +18,7 @@ proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index 1a25dbb98628bd1b526fea508158490945d2a6da..e0929a95970139450c0e953f6df7e8849e5ae727 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -14,13 +14,13 @@ pub fn log2(p: u32, q: u32) -> u32 { } let mut n = 0u32; - while !(p >= 2u32.pow(n)*q) || !(p < 2u32.pow(n+1)*q) { + while !(p >= (1u32 << n)*q) || !(p < (1u32 << (n+1))*q) { n += 1; } - assert!(p < 2u32.pow(n+1) * q); + assert!(p < (1u32 << (n+1)) * q); - let y_num: u32 = (p - 2u32.pow(n) * q).try_into().unwrap(); - let y_den: u32 = (p + 2u32.pow(n) * q).try_into().unwrap(); + let y_num: u32 = (p - (1u32 << n) * q).try_into().unwrap(); + let y_den: u32 = (p + (1u32 << n) * q).try_into().unwrap(); let _2_div_ln_2 = 2_885_390u32; diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..2686623aa1eae85655beaf9a917679d1e483319f --- /dev/null +++ b/frame/staking/src/benchmarking.rs @@ -0,0 +1,457 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Staking pallet benchmarking. + +use super::*; + +use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; + +use sp_runtime::traits::{Dispatchable, One}; +use sp_io::hashing::blake2_256; + +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account}; + +use crate::Module as Staking; +use frame_system::Module as System; + +const SEED: u32 = 0; + +fn create_funded_user(string: &'static str, n: u32) -> T::AccountId { + let user = account(string, n, SEED); + let balance = T::Currency::minimum_balance() * 100.into(); + T::Currency::make_free_balance_be(&user, balance); + user +} + +pub fn create_stash_controller(n: u32) -> Result<(T::AccountId, T::AccountId), &'static str> { + let stash = create_funded_user::("stash", n); + let controller = create_funded_user::("controller", n); + let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance() * 10.into(); + Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, reward_destination)?; + return Ok((stash, controller)) +} + +fn create_validators(max: u32) -> Result::Source>, &'static str> { + let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); + for i in 0 .. max { + let (stash, controller) = create_stash_controller::(i)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; + let stash_lookup: ::Source = T::Lookup::unlookup(stash); + validators.push(stash_lookup); + } + Ok(validators) +} + +// This function generates v validators and n nominators who are randomly nominating up to MAX_NOMINATIONS. +pub fn create_validators_with_nominators_for_era(v: u32, n: u32) -> Result<(), &'static str> { + let mut validators: Vec<::Source> = Vec::with_capacity(v as usize); + let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); + + // Create v validators + for i in 0 .. v { + let (v_stash, v_controller) = create_stash_controller::(i)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; + let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + validators.push(stash_lookup.clone()); + } + + // Create n nominators + for j in 0 .. n { + let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - j)?; + + // Have them randomly validate + let mut available_validators = validators.clone(); + let mut selected_validators: Vec<::Source> = Vec::with_capacity(MAX_NOMINATIONS); + for _ in 0 .. v.min(MAX_NOMINATIONS as u32) { + let selected = rng.next_u32() as usize % available_validators.len(); + let validator = available_validators.remove(selected); + selected_validators.push(validator); + } + Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), selected_validators)?; + } + + ValidatorCount::put(v); + + Ok(()) +} + +// This function generates one validator being nominated by n nominators, and returns +//the validator stash account. It also starts an era and creates pending payouts. +pub fn create_validator_with_nominators(n: u32, upper_bound: u32) -> Result { + let mut points_total = 0; + let mut points_individual = Vec::new(); + + MinimumValidatorCount::put(0); + + let (v_stash, v_controller) = create_stash_controller::(0)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; + let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + + points_total += 10; + points_individual.push((v_stash.clone(), 10)); + + // Give the validator n nominators, but keep total users in the system the same. + for i in 0 .. upper_bound { + let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - i)?; + if i < n { + Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; + } + } + + ValidatorCount::put(1); + + // Start a new Era + let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + + assert!(new_validators.len() == 1); + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + let current_era = CurrentEra::get().unwrap(); + ErasRewardPoints::::insert(current_era, reward); + + // Create reward pool + let total_payout = T::Currency::minimum_balance() * 1000.into(); + >::insert(current_era, total_payout); + + Ok(v_stash) +} + +benchmarks! { + _{ + // User account seed + let u in 0 .. 1000 => (); + } + + bond { + let u in ...; + let stash = create_funded_user::("stash",u); + let controller = create_funded_user::("controller", u); + let controller_lookup: ::Source = T::Lookup::unlookup(controller); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(stash), controller_lookup, amount, reward_destination) + + bond_extra { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + let max_additional = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(stash), max_additional) + + unbond { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + let amount = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(controller), amount) + + // Worst case scenario, everything is removed after the bonding duration + withdraw_unbonded { + let u in ...; + let (stash, controller) = create_stash_controller::(u)?; + let amount = T::Currency::minimum_balance() * 10.into(); + Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; + let current_block = System::::block_number(); + // let unbond_block = current_block + T::BondingDuration::get().into() + 10.into(); + // System::::set_block_number(unbond_block); + }: _(RawOrigin::Signed(controller)) + + validate { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + let prefs = ValidatorPrefs::default(); + }: _(RawOrigin::Signed(controller), prefs) + + // Worst case scenario, MAX_NOMINATIONS + nominate { + let n in 1 .. MAX_NOMINATIONS as u32; + let (_, controller) = create_stash_controller::(n + 1)?; + let validators = create_validators::(n)?; + }: _(RawOrigin::Signed(controller), validators) + + chill { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + }: _(RawOrigin::Signed(controller)) + + set_payee { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + }: _(RawOrigin::Signed(controller), RewardDestination::Controller) + + set_controller { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + let new_controller = create_funded_user::("new_controller", u); + let new_controller_lookup = T::Lookup::unlookup(new_controller); + }: _(RawOrigin::Signed(stash), new_controller_lookup) + + set_validator_count { + let c in 0 .. 1000; + }: _(RawOrigin::Root, c) + + force_no_eras { let i in 0 .. 1; }: _(RawOrigin::Root) + + force_new_era {let i in 0 .. 1; }: _(RawOrigin::Root) + + force_new_era_always { let i in 0 .. 1; }: _(RawOrigin::Root) + + // Worst case scenario, the list of invulnerables is very long. + set_invulnerables { + let v in 0 .. 1000; + let mut invulnerables = Vec::new(); + for i in 0 .. v { + invulnerables.push(account("invulnerable", i, SEED)); + } + }: _(RawOrigin::Root, invulnerables) + + force_unstake { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + }: _(RawOrigin::Root, stash) + + cancel_deferred_slash { + let s in 1 .. 1000; + let mut unapplied_slashes = Vec::new(); + let era = EraIndex::one(); + for _ in 0 .. 1000 { + unapplied_slashes.push(UnappliedSlash::>::default()); + } + UnappliedSlashes::::insert(era, &unapplied_slashes); + + let slash_indices: Vec = (0 .. s).collect(); + }: _(RawOrigin::Root, era, slash_indices) + + payout_stakers { + let n in 1 .. MAX_NOMINATIONS as u32; + let validator = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let current_era = CurrentEra::get().unwrap(); + let caller = account("caller", n, SEED); + }: _(RawOrigin::Signed(caller), validator, current_era) + + rebond { + let l in 1 .. 1000; + let (_, controller) = create_stash_controller::(u)?; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + let unlock_chunk = UnlockChunk::> { + value: 1.into(), + era: EraIndex::zero(), + }; + for _ in 0 .. l { + staking_ledger.unlocking.push(unlock_chunk.clone()) + } + Ledger::::insert(controller.clone(), staking_ledger); + }: _(RawOrigin::Signed(controller), (l + 100).into()) + + set_history_depth { + let e in 1 .. 100; + HistoryDepth::put(e); + CurrentEra::put(e); + for i in 0 .. e { + >::insert(i, T::AccountId::default(), Exposure::>::default()); + >::insert(i, T::AccountId::default(), Exposure::>::default()); + >::insert(i, T::AccountId::default(), ValidatorPrefs::default()); + >::insert(i, BalanceOf::::one()); + >::insert(i, EraRewardPoints::::default()); + >::insert(i, BalanceOf::::one()); + ErasStartSessionIndex::insert(i, i); + } + }: _(RawOrigin::Root, EraIndex::zero()) + + reap_stash { + let u in 1 .. 1000; + let (stash, controller) = create_stash_controller::(u)?; + T::Currency::make_free_balance_be(&stash, 0.into()); + }: _(RawOrigin::Signed(controller), stash) + + new_era { + let v in 1 .. 10; + let n in 1 .. 100; + MinimumValidatorCount::put(0); + create_validators_with_nominators_for_era::(v, n)?; + let session_index = SessionIndex::one(); + }: { + let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; + assert!(validators.len() == v as usize); + } + + do_slash { + let l in 1 .. 1000; + let (stash, controller) = create_stash_controller::(0)?; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + let unlock_chunk = UnlockChunk::> { + value: 1.into(), + era: EraIndex::zero(), + }; + for _ in 0 .. l { + staking_ledger.unlocking.push(unlock_chunk.clone()) + } + Ledger::::insert(controller.clone(), staking_ledger.clone()); + let slash_amount = T::Currency::minimum_balance() * 10.into(); + }: { + crate::slashing::do_slash::( + &stash, + slash_amount, + &mut BalanceOf::::zero(), + &mut NegativeImbalanceOf::::zero() + ); + } + + payout_all { + let v in 1 .. 10; + let n in 1 .. 100; + MinimumValidatorCount::put(0); + create_validators_with_nominators_for_era::(v, n)?; + // Start a new Era + let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + assert!(new_validators.len() == v as usize); + + let current_era = CurrentEra::get().unwrap(); + let mut points_total = 0; + let mut points_individual = Vec::new(); + let mut payout_calls = Vec::new(); + + for validator in new_validators.iter() { + points_total += 10; + points_individual.push((validator.clone(), 10)); + payout_calls.push(Call::::payout_stakers(validator.clone(), current_era)) + } + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + ErasRewardPoints::::insert(current_era, reward); + + // Create reward pool + let total_payout = T::Currency::minimum_balance() * 1000.into(); + >::insert(current_era, total_payout); + + let caller: T::AccountId = account("caller", 0, SEED); + }: { + for call in payout_calls { + call.dispatch(RawOrigin::Signed(caller.clone()).into())?; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::mock::{ExtBuilder, Test, Balances, Staking, Origin}; + use frame_support::assert_ok; + + #[test] + fn create_validators_with_nominators_for_era_works() { + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + let v = 10; + let n = 100; + + create_validators_with_nominators_for_era::(v,n).unwrap(); + + let count_validators = Validators::::iter().count(); + let count_nominators = Nominators::::iter().count(); + + assert_eq!(count_validators, v as usize); + assert_eq!(count_nominators, n as usize); + }); + } + + #[test] + fn create_validator_with_nominators_works() { + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + let n = 10; + + let validator_stash = create_validator_with_nominators::( + n, + MAX_NOMINATIONS as u32, + ).unwrap(); + + let current_era = CurrentEra::get().unwrap(); + + let original_free_balance = Balances::free_balance(&validator_stash); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); + let new_free_balance = Balances::free_balance(&validator_stash); + + assert!(original_free_balance < new_free_balance); + }); + } + + #[test] + fn test_payout_all() { + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + let v = 10; + let n = 100; + + let selected_benchmark = SelectedBenchmark::payout_all; + let c = vec![(frame_benchmarking::BenchmarkParameter::v, v), (frame_benchmarking::BenchmarkParameter::n, n)]; + let closure_to_benchmark = + >::instance( + &selected_benchmark, + &c + ).unwrap(); + + assert_ok!(closure_to_benchmark()); + }); + } + + #[test] + fn test_benchmarks() { + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + assert_ok!(test_benchmark_bond::()); + assert_ok!(test_benchmark_bond_extra::()); + assert_ok!(test_benchmark_unbond::()); + assert_ok!(test_benchmark_withdraw_unbonded::()); + assert_ok!(test_benchmark_validate::()); + assert_ok!(test_benchmark_nominate::()); + assert_ok!(test_benchmark_chill::()); + assert_ok!(test_benchmark_set_payee::()); + assert_ok!(test_benchmark_set_controller::()); + assert_ok!(test_benchmark_set_validator_count::()); + assert_ok!(test_benchmark_force_no_eras::()); + assert_ok!(test_benchmark_force_new_era::()); + assert_ok!(test_benchmark_force_new_era_always::()); + assert_ok!(test_benchmark_set_invulnerables::()); + assert_ok!(test_benchmark_force_unstake::()); + assert_ok!(test_benchmark_cancel_deferred_slash::()); + assert_ok!(test_benchmark_payout_stakers::()); + assert_ok!(test_benchmark_rebond::()); + assert_ok!(test_benchmark_set_history_depth::()); + assert_ok!(test_benchmark_reap_stash::()); + assert_ok!(test_benchmark_new_era::()); + assert_ok!(test_benchmark_do_slash::()); + assert_ok!(test_benchmark_payout_all::()); + }); + } +} diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index e75ac3af2bf332cc7f357807da33d386732d12f0..d20741d9bc44d656134bfe9297268f41ea3b52b8 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -19,7 +19,7 @@ //! The staking rate in NPoS is the total amount of tokens staked by nominators and validators, //! divided by the total token supply. -use sp_runtime::{Perbill, PerThing, traits::AtLeast32Bit, curve::PiecewiseLinear}; +use sp_runtime::{Perbill, traits::AtLeast32Bit, curve::PiecewiseLinear}; /// The total payout to all validators (and their nominators) per era. /// diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index d8740f44e71516b711912f63ae9e51a57bd08134..da860bd63e9434f54dcb8008b100ac142ea80049 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -72,8 +72,8 @@ //! There are three possible roles that any staked account pair can be in: `Validator`, `Nominator` //! and `Idle` (defined in [`StakerStatus`](./enum.StakerStatus.html)). There are three //! corresponding instructions to change between roles, namely: -//! [`validate`](./enum.Call.html#variant.validate), [`nominate`](./enum.Call.html#variant.nominate), -//! and [`chill`](./enum.Call.html#variant.chill). +//! [`validate`](./enum.Call.html#variant.validate), +//! [`nominate`](./enum.Call.html#variant.nominate), and [`chill`](./enum.Call.html#variant.chill). //! //! #### Validating //! @@ -104,10 +104,11 @@ //! The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! -//! Reward must be claimed by stakers for each era before it gets too old by $HISTORY_DEPTH using -//! `payout_nominator` and `payout_validator` calls. +//! Rewards must be claimed for each era before it gets too old by `$HISTORY_DEPTH` using the +//! `payout_stakers` call. Any account can call `payout_stakers`, which pays the reward to +//! the validator as well as its nominators. //! Only the [`T::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This -//! limit the i/o cost to compute nominators payout. +//! is to limit the i/o cost to mutate storage for each nominator's account. //! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who @@ -157,6 +158,7 @@ //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { //! /// Reward a validator. +//! #[weight = frame_support::weights::SimpleDispatchInfo::default()] //! pub fn reward_myself(origin) -> dispatch::DispatchResult { //! let reported = ensure_signed(origin)?; //! >::reward_by_ids(vec![(reported, 10)]); @@ -183,8 +185,8 @@ //! [`reward_by_indices`](./enum.Call.html#variant.reward_by_indices). //! //! [`Module`](./struct.Module.html) implements -//! [`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward points -//! to block producer and block producer of referenced uncles. +//! [`pallet_authorship::EventHandler`](../pallet_authorship/trait.EventHandler.html) to add reward +//! points to block producer and block producer of referenced uncles. //! //! The validator and its nominator split their reward as following: //! @@ -243,40 +245,55 @@ //! ## Related Modules //! //! - [Balances](../pallet_balances/index.html): Used to manage values at stake. -//! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new validators -//! is stored in the Session module's `Validators` at the end of each era. +//! - [Session](../pallet_session/index.html): Used to manage sessions. Also, a list of new +//! validators is stored in the Session module's `Validators` at the end of each era. -#![recursion_limit="128"] +#![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] #[cfg(test)] mod mock; #[cfg(test)] mod tests; -mod slashing; -mod migration; +#[cfg(feature = "testing-utils")] +pub mod testing_utils; +#[cfg(any(feature = "runtime-benchmarks", test))] +pub mod benchmarking; +pub mod slashing; +pub mod offchain_election; pub mod inflation; -use sp_std::{prelude::*, result, collections::btree_map::BTreeMap}; +use sp_std::{ + result, + prelude::*, + collections::btree_map::BTreeMap, + convert::{TryInto, From}, + mem::size_of, +}; use codec::{HasCompact, Encode, Decode}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - weights::SimpleDispatchInfo, - dispatch::DispatchResult, + decl_module, decl_event, decl_storage, ensure, decl_error, debug, + weights::{SimpleDispatchInfo, Weight}, + storage::IterableStorageMap, + dispatch::{IsSubType, DispatchResult}, traits::{ - Currency, LockIdentifier, LockableCurrency, - WithdrawReasons, OnUnbalanced, Imbalance, Get, Time + Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, + UnixTime, EstimateNextNewSession, EnsureOrigin, } }; -use pallet_session::historical::SessionManager; +use pallet_session::historical; use sp_runtime::{ - Perbill, PerThing, RuntimeDebug, + Perbill, PerU16, PerThing, RuntimeDebug, curve::PiecewiseLinear, traits::{ - Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, - AtLeast32Bit, EnsureOrigin, - } + Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, AtLeast32Bit, + Dispatchable, + }, + transaction_validity::{ + TransactionValidityError, TransactionValidity, ValidTransaction, InvalidTransaction, + TransactionSource, TransactionPriority, + }, }; use sp_staking::{ SessionIndex, @@ -284,14 +301,45 @@ use sp_staking::{ }; #[cfg(feature = "std")] use sp_runtime::{Serialize, Deserialize}; -use frame_system::{self as system, ensure_signed, ensure_root}; - -use sp_phragmen::ExtendedBalance; +use frame_system::{ + self as system, ensure_signed, ensure_root, ensure_none, + offchain::SubmitUnsignedTransaction, +}; +use sp_phragmen::{ + ExtendedBalance, Assignment, PhragmenScore, PhragmenResult, build_support_map, evaluate_support, + elect, generate_compact_solution_type, is_score_better, VotingLimit, SupportMap, +}; const DEFAULT_MINIMUM_VALIDATOR_COUNT: u32 = 4; -const MAX_NOMINATIONS: usize = 16; -const MAX_UNLOCKING_CHUNKS: usize = 32; const STAKING_ID: LockIdentifier = *b"staking "; +pub const MAX_UNLOCKING_CHUNKS: usize = 32; +pub const MAX_NOMINATIONS: usize = ::LIMIT; + +// syntactic sugar for logging +#[cfg(feature = "std")] +const LOG_TARGET: &'static str = "staking"; +macro_rules! log { + ($level:tt, $patter:expr $(, $values:expr)* $(,)?) => { + debug::native::$level!( + target: LOG_TARGET, + $patter $(, $values)* + ) + }; +} + +/// Data type used to index nominators in the compact type +pub type NominatorIndex = u32; + +/// Data type used to index validators in the compact type. +pub type ValidatorIndex = u16; + +// Ensure the size of both ValidatorIndex and NominatorIndex. They both need to be well below usize. +static_assertions::const_assert!(size_of::() <= size_of::()); +static_assertions::const_assert!(size_of::() <= size_of::()); + +/// Maximum number of stakers that can be stored in a snapshot. +pub(crate) const MAX_VALIDATORS: usize = ValidatorIndex::max_value() as usize; +pub(crate) const MAX_NOMINATORS: usize = NominatorIndex::max_value() as usize; /// Counter for the number of eras that have passed. pub type EraIndex = u32; @@ -299,18 +347,40 @@ pub type EraIndex = u32; /// Counter for the number of "reward" points earned by a given validator. pub type RewardPoint = u32; +// Note: Maximum nomination limit is set here -- 16. +generate_compact_solution_type!(pub GenericCompactAssignments, 16); + /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug)] -pub struct ActiveEraInfo { +pub struct ActiveEraInfo { /// Index of era. index: EraIndex, - /// Moment of start + /// Moment of start expresed as millisecond from `$UNIX_EPOCH`. /// /// Start can be none if start hasn't been set for the era yet, /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. - start: Option, + start: Option, } +/// Accuracy used for on-chain phragmen. +pub type ChainAccuracy = Perbill; + +/// Accuracy used for off-chain phragmen. This better be small. +pub type OffchainAccuracy = PerU16; + +/// The balance type of this module. +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; + +/// The compact type for election solutions. +pub type CompactAssignments = + GenericCompactAssignments; + +type PositiveImbalanceOf = + <::Currency as Currency<::AccountId>>::PositiveImbalance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; + /// Reward points of an era. Used to split era total payout between validators. /// /// This points will be used to reward validators and their respective nominators. @@ -395,8 +465,9 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out /// of the stash (assuming it doesn't get slashed first). pub unlocking: Vec>, - /// The latest and highest era which the staker has claimed reward for. - pub last_reward: Option, + /// List of eras for which the stakers behind a validator have claimed rewards. Only updated + /// for validators. + pub claimed_rewards: Vec, } impl< @@ -421,7 +492,7 @@ impl< total, active: self.active, unlocking, - last_reward: self.last_reward + claimed_rewards: self.claimed_rewards } } @@ -524,10 +595,10 @@ pub struct Nominations { #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug)] pub struct IndividualExposure { /// The stash account of the nominator in question. - who: AccountId, + pub who: AccountId, /// Amount of funds exposed. #[codec(compact)] - value: Balance, + pub value: Balance, } /// A snapshot of the stake backing a single validator in the system. @@ -559,13 +630,61 @@ pub struct UnappliedSlash { payout: Balance, } -pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; -type MomentOf = <::Time as Time>::Moment; +/// Indicate how an election round was computed. +#[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub enum ElectionCompute { + /// Result was forcefully computed on chain at the end of the session. + OnChain, + /// Result was submitted and accepted to the chain via a signed transaction. + Signed, + /// Result was submitted and accepted to the chain via an unsigned transaction (by an + /// authority). + Unsigned, +} + +/// The result of an election round. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +pub struct ElectionResult { + /// Flat list of validators who have been elected. + elected_stashes: Vec, + /// Flat list of new exposures, to be updated in the [`Exposure`] storage. + exposures: Vec<(AccountId, Exposure)>, + /// Type of the result. This is kept on chain only to track and report the best score's + /// submission type. An optimisation could remove this. + compute: ElectionCompute, +} + +/// The status of the upcoming (offchain) election. +#[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] +pub enum ElectionStatus { + /// Nothing has and will happen for now. submission window is not open. + Closed, + /// The submission window has been open since the contained block number. + Open(BlockNumber), +} + +impl ElectionStatus { + fn is_open_at(&self, n: BlockNumber) -> bool { + *self == Self::Open(n) + } + + fn is_closed(&self) -> bool { + match self { + Self::Closed => true, + _ => false + } + } + + fn is_open(&self) -> bool { + !self.is_closed() + } +} + +impl Default for ElectionStatus { + fn default() -> Self { + Self::Closed + } +} /// Means for interacting with a specialized version of the `session` trait. /// @@ -591,7 +710,8 @@ impl SessionInterface<::AccountId> for T whe >, T::SessionHandler: pallet_session::SessionHandler<::AccountId>, T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: Convert<::AccountId, Option<::AccountId>> + T::ValidatorIdOf: + Convert<::AccountId, Option<::AccountId>>, { fn disable_validator(validator: &::AccountId) -> Result { >::disable(validator) @@ -614,7 +734,7 @@ pub trait Trait: frame_system::Trait { /// /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis /// is not used. - type Time: Time; + type UnixTime: UnixTime; /// Convert a balance into a number used for election calculation. /// This must fit into a `u64` but is allowed to be sensibly lossy. @@ -655,11 +775,31 @@ pub trait Trait: frame_system::Trait { /// The NPoS reward curve to use. type RewardCurve: Get<&'static PiecewiseLinear<'static>>; - /// The maximum number of nominator rewarded for each validator. + /// Something that can estimate the next session change, accurately or as a best effort guess. + type NextNewSession: EstimateNextNewSession; + + /// How many blocks ahead of the era, within the last do we try to run the phragmen offchain? + /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will + /// be used. + type ElectionLookahead: Get; + + /// The overarching call type. + type Call: Dispatchable + From> + IsSubType, Self> + Clone; + + /// A transaction submitter. + type SubmitTransaction: SubmitUnsignedTransaction::Call>; + + /// The maximum number of nominators rewarded for each validator. /// /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim /// their reward. This used to limit the i/o cost for the nominator payout. type MaxNominatorRewardedPerValidator: Get; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } /// Mode of era-forcing. @@ -685,25 +825,26 @@ impl Default for Forcing { // storage migration logic. This should match directly with the semantic versions of the Rust crate. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] enum Releases { - V1_0_0, + V1_0_0Ancient, V2_0_0, + V3_0_0, } impl Default for Releases { fn default() -> Self { - Releases::V1_0_0 + Releases::V3_0_0 } } decl_storage! { trait Store for Module as Staking { - /// Number of era to keep in history. + /// Number of eras to keep in history. /// - /// Information is kept for eras in `[current_era - history_depth; current_era] + /// Information is kept for eras in `[current_era - history_depth; current_era]`. /// - /// Must be more than the number of era delayed by session otherwise. - /// i.e. active era must always be in history. - /// i.e. `active_era > current_era - history_depth` must be guaranteed. + /// Must be more than the number of eras delayed by session otherwise. + /// I.e. active era must always be in history. + /// I.e. `active_era > current_era - history_depth` must be guaranteed. HistoryDepth get(fn history_depth) config(): u32 = 84; /// The ideal number of staking participants. @@ -719,27 +860,27 @@ decl_storage! { pub Invulnerables get(fn invulnerables) config(): Vec; /// Map from all locked "stash" accounts to the controller account. - pub Bonded get(fn bonded): map hasher(blake2_256) T::AccountId => Option; + pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option; /// Map from all (unlocked) "controller" accounts to the info regarding the staking. pub Ledger get(fn ledger): - map hasher(blake2_256) T::AccountId + map hasher(blake2_128_concat) T::AccountId => Option>>; /// Where the reward payment should be made. Keyed by stash. - pub Payee get(fn payee): map hasher(blake2_256) T::AccountId => RewardDestination; + pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; /// The map from (wannabe) validator stash key to the preferences of that validator. pub Validators get(fn validators): - linked_map hasher(blake2_256) T::AccountId => ValidatorPrefs; + map hasher(twox_64_concat) T::AccountId => ValidatorPrefs; /// The map from nominator stash key to the set of stash keys of all validators to nominate. pub Nominators get(fn nominators): - linked_map hasher(blake2_256) T::AccountId => Option>; + map hasher(twox_64_concat) T::AccountId => Option>; /// The current era index. /// - /// This is the latest planned era, depending on how session module queues the validator + /// This is the latest planned era, depending on how the Session pallet queues the validator /// set, it might be active or not. pub CurrentEra get(fn current_era): Option; @@ -747,11 +888,11 @@ decl_storage! { /// /// The active era is the era currently rewarded. /// Validator set of this era must be equal to `SessionInterface::validators`. - pub ActiveEra get(fn active_era): Option>>; + pub ActiveEra get(fn active_era): Option; - /// The session index at which the era start for the last `HISTORY_DEPTH` eras + /// The session index at which the era start for the last `HISTORY_DEPTH` eras. pub ErasStartSessionIndex get(fn eras_start_session_index): - map hasher(blake2_256) EraIndex => Option; + map hasher(twox_64_concat) EraIndex => Option; /// Exposure of validator at era. /// @@ -765,7 +906,7 @@ decl_storage! { /// Clipped Exposure of validator at era. /// - /// This is similar to [`ErasStakers`] but number of nominators exposed is reduce to the + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the /// `T::MaxNominatorRewardedPerValidator` biggest stakers. /// (Note: the field `total` and `own` of the exposure remains unchanged). /// This is used to limit the i/o cost for the nominator payout. @@ -778,9 +919,9 @@ decl_storage! { double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId => Exposure>; - /// Similarly to `ErasStakers` this holds the preferences of validators. + /// Similar to `ErasStakers`, this holds the preferences of validators. /// - /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// This is keyed first by the era index to allow bulk deletion and then the stash account. /// /// Is it removed after `HISTORY_DEPTH` eras. // If prefs hasn't been set or has been removed then 0 commission is returned. @@ -792,19 +933,19 @@ decl_storage! { /// /// Eras that haven't finished yet or has been removed doesn't have reward. pub ErasValidatorReward get(fn eras_validator_reward): - map hasher(blake2_256) EraIndex => Option>; + map hasher(twox_64_concat) EraIndex => Option>; /// Rewards for the last `HISTORY_DEPTH` eras. /// If reward hasn't been set or has been removed then 0 reward is returned. pub ErasRewardPoints get(fn eras_reward_points): - map hasher(blake2_256) EraIndex => EraRewardPoints; + map hasher(twox_64_concat) EraIndex => EraRewardPoints; /// The total amount staked for the last `HISTORY_DEPTH` eras. /// If total hasn't been set or has been removed then 0 stake is returned. pub ErasTotalStake get(fn eras_total_stake): - map hasher(blake2_256) EraIndex => BalanceOf; + map hasher(twox_64_concat) EraIndex => BalanceOf; - /// True if the next session change will be a new era regardless of index. + /// Mode of era forcing. pub ForceEra get(fn force_era) config(): Forcing; /// The percentage of the slash that is distributed to reporters. @@ -818,7 +959,7 @@ decl_storage! { /// All unapplied slashes that are queued for later. pub UnappliedSlashes: - map hasher(blake2_256) EraIndex => Vec>>; + map hasher(twox_64_concat) EraIndex => Vec>>; /// A mapping from still-bonded eras to the first session index of that era. /// @@ -829,30 +970,58 @@ decl_storage! { /// All slashing events on validators, mapped by era to the highest slash proportion /// and slash value of the era. ValidatorSlashInEra: - double_map hasher(blake2_256) EraIndex, hasher(twox_128) T::AccountId + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId => Option<(Perbill, BalanceOf)>; /// All slashing events on nominators, mapped by era to the highest slash value of the era. NominatorSlashInEra: - double_map hasher(blake2_256) EraIndex, hasher(twox_128) T::AccountId + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId => Option>; /// Slashing spans for stash accounts. - SlashingSpans: map hasher(blake2_256) T::AccountId => Option; + SlashingSpans: map hasher(twox_64_concat) T::AccountId => Option; /// Records information about the maximum slash of a stash within a slashing span, /// as well as how much reward has been paid out. SpanSlash: - map hasher(blake2_256) (T::AccountId, slashing::SpanIndex) + map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex) => slashing::SpanRecord>; /// The earliest era for which we have a pending, unapplied slash. EarliestUnappliedSlash: Option; + /// Snapshot of validators at the beginning of the current election window. This should only + /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + pub SnapshotValidators get(fn snapshot_validators): Option>; + + /// Snapshot of nominators at the beginning of the current election window. This should only + /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + pub SnapshotNominators get(fn snapshot_nominators): Option>; + + /// The next validator set. At the end of an era, if this is available (potentially from the + /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election + /// is executed. + pub QueuedElected get(fn queued_elected): Option>>; + + /// The score of the current [`QueuedElected`]. + pub QueuedScore get(fn queued_score): Option; + + /// Flag to control the execution of the offchain election. When `Open(_)`, we accept + /// solutions to be submitted. + pub EraElectionStatus get(fn era_election_status): ElectionStatus; + + /// True if the current **planned** session is final. Note that this does not take era + /// forcing into account. + pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; + + /// True if network has been upgraded to this version. /// Storage version of the pallet. /// - /// This is set to v2.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; + /// This is set to v3.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V3_0_0): Releases; + + /// The era where we migrated from Lazy Payouts to Simple Payouts + MigrateEra: Option; } add_extra_genesis { config(stakers): @@ -890,13 +1059,15 @@ decl_storage! { decl_event!( pub enum Event where Balance = BalanceOf, ::AccountId { - /// The staker has been rewarded by this amount. AccountId is controller account. + /// The staker has been rewarded by this amount. `AccountId` is the stash account. Reward(AccountId, Balance), /// One validator (and its nominators) has been slashed by the given amount. Slash(AccountId, Balance), /// An old slashing report from a prior era was discarded because it could /// not be processed. OldSlashingReportDiscarded(SessionIndex), + /// A new set of stakers was elected with the given computation method. + StakingElection(ElectionCompute), /// An account has bonded this amount. /// /// NOTE: This event is only emitted when funds are bonded via a dispatchable. Notably, @@ -941,6 +1112,37 @@ decl_error! { InvalidNumberOfNominations, /// Items are not sorted and unique. NotSortedAndUnique, + /// Rewards for this era have already been claimed for this validator. + AlreadyClaimed, + /// The submitted result is received out of the open window. + PhragmenEarlySubmission, + /// The submitted result is not as good as the one stored on chain. + PhragmenWeakSubmission, + /// The snapshot data of the current window is missing. + SnapshotUnavailable, + /// Incorrect number of winners were presented. + PhragmenBogusWinnerCount, + /// One of the submitted winners is not an active candidate on chain (index is out of range + /// in snapshot). + PhragmenBogusWinner, + /// Error while building the assignment type from the compact. This can happen if an index + /// is invalid, or if the weights _overflow_. + PhragmenBogusCompact, + /// One of the submitted nominators is not an active nominator on chain. + PhragmenBogusNominator, + /// One of the submitted nominators has an edge to which they have not voted on chain. + PhragmenBogusNomination, + /// One of the submitted nominators has an edge which is submitted before the last non-zero + /// slash of the target. + PhragmenSlashedNomination, + /// A self vote must only be originated from a validator to ONLY themselves. + PhragmenBogusSelfVote, + /// The submitted result has unknown edges that are not among the presented winners. + PhragmenBogusEdge, + /// The claimed score does not match with the one computed from the data. + PhragmenBogusScore, + /// The call is not allowed at the given time due to restrictions of election period. + CallNotAllowed, } } @@ -956,20 +1158,82 @@ decl_module! { fn deposit_event() = default; - fn on_runtime_upgrade() { - migration::on_runtime_upgrade::(); + /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the + /// election window has opened, if we are at the last session and less blocks than + /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain + /// worker, if applicable, will execute at the end of the current block, and solutions may + /// be submitted. + fn on_initialize(now: T::BlockNumber) -> Weight { + if + // if we don't have any ongoing offchain compute. + Self::era_election_status().is_closed() && + // either current session final based on the plan, or we're forcing. + (Self::is_current_session_final() || Self::will_era_be_forced()) + { + if let Some(next_session_change) = T::NextNewSession::estimate_next_new_session(now){ + if let Some(remaining) = next_session_change.checked_sub(&now) { + if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() { + // create snapshot. + if Self::create_stakers_snapshot() { + // Set the flag to make sure we don't waste any compute here in the same era + // after we have triggered the offline compute. + >::put( + ElectionStatus::::Open(now) + ); + log!(info, "💸 Election window is Open({:?}). Snapshot created", now); + } else { + log!(warn, "💸 Failed to create snapshot at {:?}.", now); + } + + } + } + } else { + log!(warn, "💸 Estimating next session change failed."); + } + } + + // weight + 50_000 + } + + /// Check if the current block number is the one at which the election window has been set + /// to open. If so, it runs the offchain worker code. + fn offchain_worker(now: T::BlockNumber) { + use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; + + if Self::era_election_status().is_open_at(now) { + let offchain_status = set_check_offchain_execution_status::(now); + if let Err(why) = offchain_status { + log!(debug, "skipping offchain worker in open election window due to [{}]", why); + } else { + if let Err(e) = compute_offchain_election::() { + log!(warn, "💸 Error in phragmen offchain worker: {:?}", e); + } else { + log!(debug, "Executed offchain worker thread without errors."); + } + } + } } fn on_finalize() { // Set the start of the first era. if let Some(mut active_era) = Self::active_era() { if active_era.start.is_none() { - active_era.start = Some(T::Time::now()); - >::put(active_era); + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); + active_era.start = Some(now_as_millis_u64); + ActiveEra::put(active_era); } } } + fn on_runtime_upgrade() -> Weight { + // For Kusama the type hasn't actually changed as Moment was u64 and was the number of + // millisecond since unix epoch. + StorageVersion::put(Releases::V3_0_0); + Self::migrate_last_reward_to_claimed_rewards(); + 0 + } + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will /// be the account that controls it. /// @@ -984,14 +1248,14 @@ decl_module! { /// - O(1). /// - Three extra DB entries. /// - /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned unless - /// the `origin` falls below _existential deposit_ and gets removed as dust. + /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned + /// unless the `origin` falls below _existential deposit_ and gets removed as dust. /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] - fn bond(origin, + pub fn bond(origin, controller: ::Source, #[compact] value: BalanceOf, - payee: RewardDestination + payee: RewardDestination, ) { let stash = ensure_signed(origin)?; @@ -1017,6 +1281,10 @@ decl_module! { system::Module::::inc_ref(&stash); + let current_era = CurrentEra::get().unwrap_or(0); + let history_depth = Self::history_depth(); + let last_reward_era = current_era.saturating_sub(history_depth); + let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); Self::deposit_event(RawEvent::Bonded(stash.clone(), value)); @@ -1025,7 +1293,7 @@ decl_module! { total: value, active: value, unlocking: vec![], - last_reward: Self::current_era(), + claimed_rewards: (last_reward_era..current_era).collect(), }; Self::update_ledger(&controller, &item); } @@ -1037,7 +1305,8 @@ decl_module! { /// Unlike [`bond`] or [`unbond`] this function does not impose any limitation on the amount /// that can be added. /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller and + /// it can be only called when [`EraElectionStatus`] is `Closed`. /// /// Emits `Bonded`. /// @@ -1048,6 +1317,7 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] fn bond_extra(origin, #[compact] max_additional: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let stash = ensure_signed(origin)?; let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; @@ -1076,6 +1346,7 @@ decl_module! { /// to be called first to remove some of the chunks (if possible). /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// /// Emits `Unbonded`. /// @@ -1092,6 +1363,7 @@ decl_module! { /// #[weight = SimpleDispatchInfo::FixedNormal(400_000)] fn unbond(origin, #[compact] value: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; ensure!( @@ -1124,6 +1396,7 @@ decl_module! { /// whatever it wants. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// /// Emits `Withdrawn`. /// @@ -1138,6 +1411,7 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(400_000)] fn withdraw_unbonded(origin) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let (stash, old_total) = (ledger.stash.clone(), ledger.total); @@ -1171,6 +1445,7 @@ decl_module! { /// Effects will be felt at the beginning of the next era. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// /// # /// - Independent of the arguments. Insignificant complexity. @@ -1178,7 +1453,8 @@ decl_module! { /// - Writes are limited to the `origin` account key. /// # #[weight = SimpleDispatchInfo::FixedNormal(750_000)] - fn validate(origin, prefs: ValidatorPrefs) { + pub fn validate(origin, prefs: ValidatorPrefs) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1188,23 +1464,26 @@ decl_module! { /// Declare the desire to nominate `targets` for the origin controller. /// - /// Effects will be felt at the beginning of the next era. + /// Effects will be felt at the beginning of the next era. This can only be called when + /// [`EraElectionStatus`] is `Closed`. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// /// # /// - The transaction's complexity is proportional to the size of `targets`, - /// which is capped at `MAX_NOMINATIONS`. + /// which is capped at CompactAssignments::LIMIT. /// - Both the reads and writes follow a similar pattern. /// # #[weight = SimpleDispatchInfo::FixedNormal(750_000)] - fn nominate(origin, targets: Vec<::Source>) { + pub fn nominate(origin, targets: Vec<::Source>) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; ensure!(!targets.is_empty(), Error::::EmptyTargets); let targets = targets.into_iter() - .take(MAX_NOMINATIONS) + .take(::LIMIT) .map(|t| T::Lookup::lookup(t)) .collect::, _>>()?; @@ -1224,6 +1503,7 @@ decl_module! { /// Effects will be felt at the beginning of the next era. /// /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. /// /// # /// - Independent of the arguments. Insignificant complexity. @@ -1232,6 +1512,7 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] fn chill(origin) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; Self::chill_stash(&ledger.stash); @@ -1290,8 +1571,6 @@ decl_module! { ValidatorCount::put(new); } - // ----- Root calls. - /// Force there to be no new eras indefinitely. /// /// # @@ -1359,7 +1638,7 @@ decl_module! { .or_else(ensure_root)?; ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); - ensure!(Self::is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); + ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); let mut unapplied = ::UnappliedSlashes::get(&era); let last_item = slash_indices[slash_indices.len() - 1]; @@ -1373,6 +1652,10 @@ decl_module! { ::UnappliedSlashes::insert(&era, &unapplied); } + /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving + /// opportunity for users to claim all rewards before moving to Simple Payouts. After this + /// time, you should use `payout_stakers` instead.** + /// /// Make one nominator's payout for one era. /// /// - `who` is the controller account of the nominator to pay out. @@ -1403,10 +1686,14 @@ decl_module! { fn payout_nominator(origin, era: EraIndex, validators: Vec<(T::AccountId, u32)>) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::do_payout_nominator(who, era, validators) + let ctrl = ensure_signed(origin)?; + Self::do_payout_nominator(ctrl, era, validators) } + /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving + /// opportunity for users to claim all rewards before moving to Simple Payouts. After this + /// time, you should use `payout_stakers` instead.** + /// /// Make one validator's payout for one era. /// /// - `who` is the controller account of the validator to pay out. @@ -1424,24 +1711,47 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] fn payout_validator(origin, era: EraIndex) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::do_payout_validator(who, era) + let ctrl = ensure_signed(origin)?; + Self::do_payout_validator(ctrl, era) + } + + /// Pay out all the stakers behind a single validator for a single era. + /// + /// - `validator_stash` is the stash account of the validator. Their nominators, up to + /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// This can only be called when [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - Time complexity: at most O(MaxNominatorRewardedPerValidator). + /// - Contains a limited number of reads and writes. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000)] + fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + ensure_signed(origin)?; + Self::do_payout_stakers(validator_stash, era) } /// Rebond a portion of the stash scheduled to be unlocked. /// + /// The dispatch origin must be signed by the controller, and it can be only called when + /// [`EraElectionStatus`] is `Closed`. + /// /// # /// - Time complexity: O(1). Bounded by `MAX_UNLOCKING_CHUNKS`. /// - Storage changes: Can't increase storage, only decrease it. /// # #[weight = SimpleDispatchInfo::FixedNormal(500_000)] fn rebond(origin, #[compact] value: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!( - !ledger.unlocking.is_empty(), - Error::::NoUnlockChunk, - ); + ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); let ledger = ledger.rebond(value); Self::update_ledger(&controller, &ledger); @@ -1472,30 +1782,224 @@ decl_module! { /// This can be called from any origin. /// /// - `stash`: The stash account to reap. Its balance must be zero. + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn reap_stash(_origin, stash: T::AccountId) { ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); Self::kill_stash(&stash)?; T::Currency::remove_lock(STAKING_ID, &stash); } + + /// Submit a phragmen result to the chain. If the solution: + /// + /// 1. is valid. + /// 2. has a better score than a potentially existing solution on chain. + /// + /// then, it will be _put_ on chain. + /// + /// A solution consists of two pieces of data: + /// + /// 1. `winners`: a flat vector of all the winners of the round. + /// 2. `assignments`: the compact version of an assignment vector that encodes the edge + /// weights. + /// + /// Both of which may be computed using [`phragmen`], or any other algorithm. + /// + /// Additionally, the submitter must provide: + /// + /// - The `score` that they claim their solution has. + /// + /// Both validators and nominators will be represented by indices in the solution. The + /// indices should respect the corresponding types ([`ValidatorIndex`] and + /// [`NominatorIndex`]). Moreover, they should be valid when used to index into + /// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the + /// solution to be rejected. These two storage items are set during the election window and + /// may be used to determine the indices. + /// + /// A solution is valid if: + /// + /// 0. It is submitted when [`EraElectionStatus`] is `Open`. + /// 1. Its claimed score is equal to the score computed on-chain. + /// 2. Presents the correct number of winners. + /// 3. All indexes must be value according to the snapshot vectors. All edge values must + /// also be correct and should not overflow the granularity of the ratio type (i.e. 256 + /// or billion). + /// 4. For each edge, all targets are actually nominated by the voter. + /// 5. Has correct self-votes. + /// + /// A solutions score is consisted of 3 parameters: + /// + /// 1. `min { support.total }` for each support of a winner. This value should be maximized. + /// 2. `sum { support.total }` for each support of a winner. This value should be minimized. + /// 3. `sum { support.total^2 }` for each support of a winner. This value should be + /// minimized (to ensure less variance) + /// + /// # + /// E: number of edges. m: size of winner committee. n: number of nominators. d: edge degree + /// (16 for now) v: number of on-chain validator candidates. + /// + /// NOTE: given a solution which is reduced, we can enable a new check the ensure `|E| < n + + /// m`. We don't do this _yet_, but our offchain worker code executes it nonetheless. + /// + /// major steps (all done in `check_and_replace_solution`): + /// + /// - Storage: O(1) read `ElectionStatus`. + /// - Storage: O(1) read `PhragmenScore`. + /// - Storage: O(1) read `ValidatorCount`. + /// - Storage: O(1) length read from `SnapshotValidators`. + /// + /// - Storage: O(v) reads of `AccountId` to fetch `snapshot_validators`. + /// - Memory: O(m) iterations to map winner index to validator id. + /// - Storage: O(n) reads `AccountId` to fetch `snapshot_nominators`. + /// - Memory: O(n + m) reads to map index to `AccountId` for un-compact. + /// + /// - Storage: O(e) accountid reads from `Nomination` to read correct nominations. + /// - Storage: O(e) calls into `slashable_balance_of_extended` to convert ratio to staked. + /// + /// - Memory: build_support_map. O(e). + /// - Memory: evaluate_support: O(E). + /// + /// - Storage: O(e) writes to `QueuedElected`. + /// - Storage: O(1) write to `QueuedScore` + /// + /// The weight of this call is 1/10th of the blocks total weight. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + pub fn submit_election_solution( + origin, + winners: Vec, + compact_assignments: CompactAssignments, + score: PhragmenScore, + era: EraIndex, + ) { + let _who = ensure_signed(origin)?; + Self::check_and_replace_solution( + winners, + compact_assignments, + ElectionCompute::Signed, + score, + era, + )? + } + + /// Unsigned version of `submit_election_solution`. + /// + /// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions + /// from the local node to be included. In other words, only the block author can include a + /// transaction in the block. + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + pub fn submit_election_solution_unsigned( + origin, + winners: Vec, + compact_assignments: CompactAssignments, + score: PhragmenScore, + era: EraIndex, + ) { + ensure_none(origin)?; + Self::check_and_replace_solution( + winners, + compact_assignments, + ElectionCompute::Unsigned, + score, + era, + )? + // TODO: instead of returning an error, panic. This makes the entire produced block + // invalid. + // This ensures that block authors will not ever try and submit a solution which is not + // an improvement, since they will lose their authoring points/rewards. + } } } impl Module { - // PUBLIC IMMUTABLES + /// Migrate `last_reward` to `claimed_rewards` + pub fn migrate_last_reward_to_claimed_rewards() { + use frame_support::migration::{StorageIterator, put_storage_value}; + // Migrate from `last_reward` to `claimed_rewards`. + // We will construct a vector from `current_era - history_depth` to `last_reward` + // for each validator and nominator. + // + // Old Staking Ledger + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] + struct OldStakingLedger { + pub stash: AccountId, + #[codec(compact)] + pub total: Balance, + #[codec(compact)] + pub active: Balance, + pub unlocking: Vec>, + pub last_reward: Option, + } + // Current era and history depth + let current_era = Self::current_era().unwrap_or(0); + let history_depth = Self::history_depth(); + let last_payout_era = current_era.saturating_sub(history_depth); + // Convert all ledgers to the new format. + for (hash, old_ledger) in StorageIterator::>>::new(b"Staking", b"Ledger").drain() { + let last_reward = old_ledger.last_reward.unwrap_or(0); + let new_ledger = StakingLedger { + stash: old_ledger.stash, + total: old_ledger.total, + active: old_ledger.active, + unlocking: old_ledger.unlocking, + claimed_rewards: (last_payout_era..=last_reward).collect(), + }; + put_storage_value(b"Staking", b"Ledger", &hash, new_ledger); + } + MigrateEra::put(current_era); + } /// The total balance that can be slashed from a stash account as of right now. pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() } - /// Check that list is sorted and has no duplicates. - fn is_sorted_and_unique(list: &Vec) -> bool { - list.windows(2).all(|w| w[0] < w[1]) + /// internal impl of [`slashable_balance_of`] that returns [`ExtendedBalance`]. + fn slashable_balance_of_extended(stash: &T::AccountId) -> ExtendedBalance { + , u64>>::convert( + Self::slashable_balance_of(stash) + ) as ExtendedBalance + } + + /// Dump the list of validators and nominators into vectors and keep them on-chain. + /// + /// This data is used to efficiently evaluate election results. returns `true` if the operation + /// is successful. + fn create_stakers_snapshot() -> bool { + let validators = >::iter().map(|(v, _)| v).collect::>(); + let mut nominators = >::iter().map(|(n, _)| n).collect::>(); + + let num_validators = validators.len(); + let num_nominators = nominators.len(); + if + num_validators > MAX_VALIDATORS || + num_nominators.saturating_add(num_validators) > MAX_NOMINATORS + { + log!( + warn, + "💸 Snapshot size too big [{} <> {}][{} <> {}].", + num_validators, + MAX_VALIDATORS, + num_nominators, + MAX_NOMINATORS, + ); + false + } else { + // all validators nominate themselves; + nominators.extend(validators.clone()); + + >::put(validators); + >::put(nominators); + true + } } - // MUTABLES (DANGEROUS) + /// Clears both snapshots of stakers. + fn kill_stakers_snapshot() { + >::kill(); + >::kill(); + } - fn do_payout_nominator(who: T::AccountId, era: EraIndex, validators: Vec<(T::AccountId, u32)>) + fn do_payout_nominator(ctrl: T::AccountId, era: EraIndex, validators: Vec<(T::AccountId, u32)>) -> DispatchResult { // validators len must not exceed `MAX_NOMINATIONS` to avoid querying more validator @@ -1503,20 +2007,35 @@ impl Module { if validators.len() > MAX_NOMINATIONS { return Err(Error::::InvalidNumberOfNominations.into()); } + // If migrate_era is not populated, then you should use `payout_stakers` + let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; + // This payout mechanism will only work for eras before the migration. + // Subsequent payouts should use `payout_stakers`. + ensure!(era < migrate_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); // Note: if era has no reward to be claimed, era may be future. better not to update // `nominator_ledger.last_reward` in this case. let era_payout = >::get(&era) .ok_or_else(|| Error::::InvalidEraToReward)?; - let mut nominator_ledger = >::get(&who).ok_or_else(|| Error::::NotController)?; + let mut nominator_ledger = >::get(&ctrl).ok_or_else(|| Error::::NotController)?; - if nominator_ledger.last_reward.map(|last_reward| last_reward >= era).unwrap_or(false) { - return Err(Error::::InvalidEraToReward.into()); + ensure!( + Self::era_election_status().is_closed() || Self::payee(&nominator_ledger.stash) != RewardDestination::Staked, + Error::::CallNotAllowed, + ); + + nominator_ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); + match nominator_ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => nominator_ledger.claimed_rewards.insert(pos, era), } - nominator_ledger.last_reward = Some(era); - >::insert(&who, &nominator_ledger); + >::insert(&ctrl, &nominator_ledger); let mut reward = Perbill::zero(); let era_reward_points = >::get(&era); @@ -1552,25 +2071,42 @@ impl Module { } if let Some(imbalance) = Self::make_payout(&nominator_ledger.stash, reward * era_payout) { - Self::deposit_event(RawEvent::Reward(who, imbalance.peek())); + Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); } Ok(()) } - fn do_payout_validator(who: T::AccountId, era: EraIndex) -> DispatchResult { + fn do_payout_validator(ctrl: T::AccountId, era: EraIndex) -> DispatchResult { + // If migrate_era is not populated, then you should use `payout_stakers` + let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; + // This payout mechanism will only work for eras before the migration. + // Subsequent payouts should use `payout_stakers`. + ensure!(era < migrate_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); + // Note: if era has no reward to be claimed, era may be future. better not to update // `ledger.last_reward` in this case. let era_payout = >::get(&era) .ok_or_else(|| Error::::InvalidEraToReward)?; - let mut ledger = >::get(&who).ok_or_else(|| Error::::NotController)?; - if ledger.last_reward.map(|last_reward| last_reward >= era).unwrap_or(false) { - return Err(Error::::InvalidEraToReward.into()); + let mut ledger = >::get(&ctrl).ok_or_else(|| Error::::NotController)?; + + ensure!( + Self::era_election_status().is_closed() || Self::payee(&ledger.stash) != RewardDestination::Staked, + Error::::CallNotAllowed, + ); + + ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), } - ledger.last_reward = Some(era); - >::insert(&who, &ledger); + >::insert(&ctrl, &ledger); let era_reward_points = >::get(&era); let commission = Self::eras_validator_prefs(&era, &ledger.stash).commission; @@ -1594,7 +2130,109 @@ impl Module { ); if let Some(imbalance) = Self::make_payout(&ledger.stash, reward * era_payout) { - Self::deposit_event(RawEvent::Reward(who, imbalance.peek())); + Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); + } + + Ok(()) + } + + fn do_payout_stakers( + validator_stash: T::AccountId, + era: EraIndex, + ) -> DispatchResult { + // Validate input data + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); + + // If there was no migration, then this function is always valid. + if let Some(migrate_era) = MigrateEra::get() { + // This payout mechanism will only work for eras on and after the migration. + // Payouts before then should use `payout_nominator`/`payout_validator`. + ensure!(migrate_era <= era, Error::::InvalidEraToReward); + } + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `ledger.claimed_rewards` in this case. + let era_payout = >::get(&era) + .ok_or_else(|| Error::::InvalidEraToReward)?; + + let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; + let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; + + ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), + } + + let exposure = >::get(&era, &ledger.stash); + + /* Input data seems good, no errors allowed after this point */ + + >::insert(&controller, &ledger); + + // Get Era reward points. It has TOTAL and INDIVIDUAL + // Find the fraction of the era reward that belongs to the validator + // Take that fraction of the eras rewards to split to nominator and validator + // + // Then look at the validator, figure out the proportion of their reward + // which goes to them and each of their nominators. + + let era_reward_points = >::get(&era); + let total_reward_points = era_reward_points.total; + let validator_reward_points = era_reward_points.individual.get(&ledger.stash) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + + // Nothing to do if they have no reward points. + if validator_reward_points.is_zero() { return Ok(())} + + // This is the fraction of the total reward that the validator and the + // nominators will get. + let validator_total_reward_part = Perbill::from_rational_approximation( + validator_reward_points, + total_reward_points, + ); + + // This is how much validator + nominators are entitled to. + let validator_total_payout = validator_total_reward_part * era_payout; + + let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); + // Validator first gets a cut off the top. + let validator_commission = validator_prefs.commission; + let validator_commission_payout = validator_commission * validator_total_payout; + + let validator_leftover_payout = validator_total_payout - validator_commission_payout; + // Now let's calculate how this is split to the validator. + let validator_exposure_part = Perbill::from_rational_approximation( + exposure.own, + exposure.total, + ); + let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + + // We can now make total validator payout: + if let Some(imbalance) = Self::make_payout( + &ledger.stash, + validator_staking_payout + validator_commission_payout + ) { + Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); + } + + // Lets now calculate how this is split to the nominators. + // Sort nominators by highest to lowest exposure, but only keep `max_nominator_payouts` of them. + for nominator in exposure.others.iter() { + let nominator_exposure_part = Perbill::from_rational_approximation( + nominator.value, + exposure.total, + ); + + let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; + // We can now make nominator payout: + if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); + } } Ok(()) @@ -1662,9 +2300,17 @@ impl Module { Forcing::ForceNew => ForceEra::kill(), Forcing::ForceAlways => (), Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), - _ => return None, + _ => { + // not forcing, not a new era either. If final, set the flag. + if era_length + 1 >= T::SessionsPerEra::get() { + IsCurrentSessionFinal::put(true); + } + return None + }, } + // new era. + IsCurrentSessionFinal::put(false); Self::new_era(session_index) } else { // Set initial era @@ -1672,6 +2318,181 @@ impl Module { } } + /// Basic and cheap checks that we perform in validate unsigned, and in the execution. + pub fn pre_dispatch_checks(score: PhragmenScore, era: EraIndex) -> Result<(), Error> { + // discard solutions that are not in-time + // check window open + ensure!( + Self::era_election_status().is_open(), + Error::::PhragmenEarlySubmission, + ); + + // check current era. + if let Some(current_era) = Self::active_era().map(|e| e.index) { + ensure!( + current_era == era, + Error::::PhragmenEarlySubmission, + ) + } + + // assume the given score is valid. Is it better than what we have on-chain, if we have any? + if let Some(queued_score) = Self::queued_score() { + ensure!( + is_score_better(queued_score, score), + Error::::PhragmenWeakSubmission, + ) + } + + Ok(()) + } + + /// Checks a given solution and if correct and improved, writes it on chain as the queued result + /// of the next round. This may be called by both a signed and an unsigned transaction. + pub fn check_and_replace_solution( + winners: Vec, + compact_assignments: CompactAssignments, + compute: ElectionCompute, + claimed_score: PhragmenScore, + era: EraIndex, + ) -> Result<(), Error> { + // Do the basic checks. era, claimed score and window open. + Self::pre_dispatch_checks(claimed_score, era)?; + + // Check that the number of presented winners is sane. Most often we have more candidates + // that we need. Then it should be Self::validator_count(). Else it should be all the + // candidates. + let snapshot_length = >::decode_len() + .map_err(|_| Error::::SnapshotUnavailable)?; + let desired_winners = Self::validator_count().min(snapshot_length as u32); + ensure!(winners.len() as u32 == desired_winners, Error::::PhragmenBogusWinnerCount); + + // decode snapshot validators. + let snapshot_validators = Self::snapshot_validators() + .ok_or(Error::::SnapshotUnavailable)?; + + // check if all winners were legit; this is rather cheap. Replace with accountId. + let winners = winners.into_iter().map(|widx| { + // NOTE: at the moment, since staking is explicitly blocking any offence until election + // is closed, we don't check here if the account id at `snapshot_validators[widx]` is + // actually a validator. If this ever changes, this loop needs to also check this. + snapshot_validators.get(widx as usize).cloned().ok_or(Error::::PhragmenBogusWinner) + }).collect::, Error>>()?; + + // decode the rest of the snapshot. + let snapshot_nominators = >::snapshot_nominators() + .ok_or(Error::::SnapshotUnavailable)?; + + // helpers + let nominator_at = |i: NominatorIndex| -> Option { + snapshot_nominators.get(i as usize).cloned() + }; + let validator_at = |i: ValidatorIndex| -> Option { + snapshot_validators.get(i as usize).cloned() + }; + + // un-compact. + let assignments = compact_assignments.into_assignment( + nominator_at, + validator_at, + ).map_err(|e| { + // log the error since it is not propagated into the runtime error. + log!(warn, "💸 un-compacting solution failed due to {:?}", e); + Error::::PhragmenBogusCompact + })?; + + // check all nominators actually including the claimed vote. Also check correct self votes. + // Note that we assume all validators and nominators in `assignments` are properly bonded, + // because they are coming from the snapshot via a given index. + for Assignment { who, distribution } in assignments.iter() { + let is_validator = >::contains_key(&who); + let maybe_nomination = Self::nominators(&who); + + if !(maybe_nomination.is_some() ^ is_validator) { + // all of the indices must map to either a validator or a nominator. If this is ever + // not the case, then the locking system of staking is most likely faulty, or we + // have bigger problems. + log!(error, "💸 detected an error in the staking locking and snapshot."); + // abort. + return Err(Error::::PhragmenBogusNominator); + } + + if !is_validator { + // a normal vote + let nomination = maybe_nomination.expect( + "exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \ + is_validator is false; maybe_nomination is some; qed" + ); + + // NOTE: we don't really have to check here if the sum of all edges are the + // nominator correct. Un-compacting assures this by definition. + + for (t, _) in distribution { + // each target in the provided distribution must be actually nominated by the + // nominator after the last non-zero slash. + if nomination.targets.iter().find(|&tt| tt == t).is_none() { + return Err(Error::::PhragmenBogusNomination); + } + + if ::SlashingSpans::get(&t).map_or( + false, + |spans| nomination.submitted_in < spans.last_nonzero_slash(), + ) { + return Err(Error::::PhragmenSlashedNomination); + } + } + } else { + // a self vote + ensure!(distribution.len() == 1, Error::::PhragmenBogusSelfVote); + ensure!(distribution[0].0 == *who, Error::::PhragmenBogusSelfVote); + // defensive only. A compact assignment of length one does NOT encode the weight and + // it is always created to be 100%. + ensure!( + distribution[0].1 == OffchainAccuracy::one(), + Error::::PhragmenBogusSelfVote, + ); + } + } + + // convert into staked assignments. + let staked_assignments = sp_phragmen::assignment_ratio_to_staked( + assignments, + Self::slashable_balance_of_extended, + ); + + // build the support map thereof in order to evaluate. + // OPTIMIZATION: loop to create the staked assignments but it would bloat the code. Okay for + // now as it does not add to the complexity order. + let (supports, num_error) = build_support_map::( + &winners, + &staked_assignments, + ); + // This technically checks that all targets in all nominators were among the winners. + ensure!(num_error == 0, Error::::PhragmenBogusEdge); + + // Check if the score is the same as the claimed one. + let submitted_score = evaluate_support(&supports); + ensure!(submitted_score == claimed_score, Error::::PhragmenBogusScore); + + // At last, alles Ok. Exposures and store the result. + let exposures = Self::collect_exposure(supports); + log!( + info, + "💸 A better solution (with compute {:?}) has been validated and stored on chain.", + compute, + ); + + // write new results. + >::put(ElectionResult { + elected_stashes: winners, + compute, + exposures, + }); + QueuedScore::put(submitted_score); + + Ok(()) + + } + /// Start a session potentially starting an era. fn start_session(start_session: SessionIndex) { let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); @@ -1706,7 +2527,7 @@ impl Module { /// * reset `active_era.start`, /// * update `BondedEras` and apply slashes. fn start_era(start_session: SessionIndex) { - let active_era = >::mutate(|active_era| { + let active_era = ActiveEra::mutate(|active_era| { let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); *active_era = Some(ActiveEraInfo { index: new_index, @@ -1744,12 +2565,12 @@ impl Module { } /// Compute payout for era. - fn end_era(active_era: ActiveEraInfo>, _session_index: SessionIndex) { + fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) { // Note: active_era_start can be None if end era is called during genesis config. if let Some(active_era_start) = active_era.start { - let now = T::Time::now(); + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - let era_duration = now - active_era_start; + let era_duration = now_as_millis_u64 - active_era_start; let (total_payout, _max_payout) = inflation::compute_total_payout( &T::RewardCurve::get(), Self::eras_total_stake(&active_era.index), @@ -1778,63 +2599,168 @@ impl Module { } // Set staking information for new era. - let maybe_new_validators = Self::select_validators(current_era); + let maybe_new_validators = Self::select_and_update_validators(current_era); maybe_new_validators } - /// Clear all era information for given era. - fn clear_era_information(era_index: EraIndex) { - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove(era_index); - >::remove(era_index); - >::remove(era_index); - ErasStartSessionIndex::remove(era_index); - } + /// Select the new validator set at the end of the era. + /// + /// Runs [`try_do_phragmen`] and updates the following storage items: + /// - [`EraElectionStatus`]: with `None`. + /// - [`ErasStakers`]: with the new staker set. + /// - [`ErasStakersClipped`]. + /// - [`ErasValidatorPrefs`]. + /// - [`ErasTotalStake`]: with the new total stake. + /// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed. + /// + /// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed. + /// + /// If the election has been successful, It passes the new set upwards. + /// + /// This should only be called at the end of an era. + fn select_and_update_validators(current_era: EraIndex) -> Option> { + if let Some(ElectionResult::> { + elected_stashes, + exposures, + compute, + }) = Self::try_do_phragmen() { + // We have chosen the new validator set. Submission is no longer allowed. + >::put(ElectionStatus::Closed); + + // kill the snapshots. + Self::kill_stakers_snapshot(); + + // Populate Stakers and write slot stake. + let mut total_stake: BalanceOf = Zero::zero(); + exposures.into_iter().for_each(|(stash, exposure)| { + total_stake = total_stake.saturating_add(exposure.total); + >::insert(current_era, &stash, &exposure); - /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. - fn apply_unapplied_slashes(active_era: EraIndex) { - let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash); + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); } + >::insert(¤t_era, &stash, exposure_clipped); + }); + + // Insert current era staking information + >::insert(¤t_era, total_stake); + + // collect the pref of all winners + for stash in &elected_stashes { + let pref = Self::validators(stash); + >::insert(¤t_era, stash, pref); } - *earliest = (*earliest).max(keep_from) - }) + // emit event + Self::deposit_event(RawEvent::StakingElection(compute)); + + log!( + info, + "💸 new validator set of size {:?} has been elected via {:?} for era {:?}", + elected_stashes.len(), + compute, + current_era, + ); + + Some(elected_stashes) + } else { + None + } + } + + /// Select a new validator set from the assembled stakers and their role preferences. It tries + /// first to peek into [`QueuedElected`]. Otherwise, it runs a new phragmen. + /// + /// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage + /// is updated. + fn try_do_phragmen() -> Option>> { + // a phragmen result from either a stored submission or locally executed one. + let next_result = >::take().or_else(|| + Self::do_phragmen_with_post_processing::(ElectionCompute::OnChain) + ); + + // either way, kill this. We remove it here to make sure it always has the exact same + // lifetime as `QueuedElected`. + QueuedScore::kill(); + + next_result } - /// Select a new validator set from the assembled stakers and their role preferences, and store - /// staking information for the new current era. + /// Execute phragmen and return the new results. The edge weights are processed into support + /// values. + /// + /// This is basically a wrapper around [`do_phragmen`] which translates `PhragmenResult` into + /// `ElectionResult`. /// - /// Fill the storages `ErasStakers`, `ErasStakersClipped`, `ErasValidatorPrefs` and - /// `ErasTotalStake` for current era. + /// No storage item is updated. + fn do_phragmen_with_post_processing(compute: ElectionCompute) + -> Option>> + where + Accuracy: sp_std::ops::Mul, + ExtendedBalance: From<::Inner>, + { + if let Some(phragmen_result) = Self::do_phragmen::() { + let elected_stashes = phragmen_result.winners.iter() + .map(|(s, _)| s.clone()) + .collect::>(); + let assignments = phragmen_result.assignments; + + let staked_assignments = sp_phragmen::assignment_ratio_to_staked( + assignments, + Self::slashable_balance_of_extended, + ); + + let (supports, _) = build_support_map::( + &elected_stashes, + &staked_assignments, + ); + + // collect exposures + let exposures = Self::collect_exposure(supports); + + // In order to keep the property required by `on_session_ending` that we must return the + // new validator set even if it's the same as the old, as long as any underlying + // economic conditions have changed, we don't attempt to do any optimization where we + // compare against the prior set. + Some(ElectionResult::> { + elected_stashes, + exposures, + compute, + }) + } else { + // There were not enough candidates for even our minimal level of functionality. This is + // bad. We should probably disable all functionality except for block production and let + // the chain keep producing blocks until we can decide on a sufficiently substantial + // set. TODO: #2494 + None + } + } + + /// Execute phragmen and return the new results. No post-processing is applied and the raw edge + /// weights are returned. /// - /// Returns a set of newly selected _stash_ IDs. + /// Self votes are added and nominations before the most recent slashing span are reaped. /// - /// Assumes storage is coherent with the declaration. - fn select_validators(current_era: EraIndex) -> Option> { - let mut all_nominators: Vec<(T::AccountId, Vec)> = Vec::new(); - let mut all_validators_and_prefs = BTreeMap::new(); + /// No storage item is updated. + fn do_phragmen() -> Option> { + let mut all_nominators: Vec<(T::AccountId, BalanceOf, Vec)> = Vec::new(); let mut all_validators = Vec::new(); - for (validator, preference) in >::enumerate() { - let self_vote = (validator.clone(), vec![validator.clone()]); + for (validator, _) in >::iter() { + // append self vote + let self_vote = (validator.clone(), Self::slashable_balance_of(&validator), vec![validator.clone()]); all_nominators.push(self_vote); - all_validators_and_prefs.insert(validator.clone(), preference); all_validators.push(validator); } - let nominator_votes = >::enumerate().map(|(nominator, nominations)| { + let nominator_votes = >::iter().map(|(nominator, nominations)| { let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; // Filter out nomination targets which were nominated before the most recent - // non-zero slash. + // slashing span. targets.retain(|stash| { ::SlashingSpans::get(&stash).map_or( true, @@ -1844,95 +2770,49 @@ impl Module { (nominator, targets) }); - all_nominators.extend(nominator_votes); + all_nominators.extend(nominator_votes.map(|(n, ns)| { + let s = Self::slashable_balance_of(&n); + (n, s, ns) + })); - let maybe_phragmen_result = sp_phragmen::elect::<_, _, _, T::CurrencyToVote, Perbill>( + elect::<_, _, T::CurrencyToVote, Accuracy>( Self::validator_count() as usize, Self::minimum_validator_count().max(1) as usize, all_validators, all_nominators, - Self::slashable_balance_of, - ); - - if let Some(phragmen_result) = maybe_phragmen_result { - let elected_stashes = phragmen_result.winners.into_iter() - .map(|(s, _)| s) - .collect::>(); - let assignments = phragmen_result.assignments; - - let to_balance = |e: ExtendedBalance| - >>::convert(e); - - let supports = sp_phragmen::build_support_map::<_, _, _, T::CurrencyToVote, Perbill>( - &elected_stashes, - &assignments, - Self::slashable_balance_of, - ); - - // Populate stakers information and figure out the total stake. - let mut total_staked = BalanceOf::::zero(); - for (c, s) in supports.into_iter() { - // build `struct exposure` from `support` - let mut others = Vec::new(); - let mut own: BalanceOf = Zero::zero(); - let mut total: BalanceOf = Zero::zero(); - s.voters - .into_iter() - .map(|(who, value)| (who, to_balance(value))) - .for_each(|(who, value)| { - if who == c { - own = own.saturating_add(value); - } else { - others.push(IndividualExposure { who, value }); - } - total = total.saturating_add(value); - }); - - total_staked = total_staked.saturating_add(total); - - let exposure = Exposure { - own, - others, - // This might reasonably saturate and we cannot do much about it. The sum of - // someone's stake might exceed the balance type if they have the maximum amount - // of balance and receive some support. This is super unlikely to happen, yet - // we simulate it in some tests. - total, - }; - >::insert(¤t_era, &c, &exposure); + ) + } - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(¤t_era, &c, exposure_clipped); - } + /// Consume a set of [`Supports`] from [`sp_phragmen`] and collect them into a [`Exposure`] + fn collect_exposure(supports: SupportMap) -> Vec<(T::AccountId, Exposure>)> { + let to_balance = |e: ExtendedBalance| + >>::convert(e); + + supports.into_iter().map(|(validator, support)| { + // build `struct exposure` from `support` + let mut others = Vec::new(); + let mut own: BalanceOf = Zero::zero(); + let mut total: BalanceOf = Zero::zero(); + support.voters + .into_iter() + .map(|(nominator, weight)| (nominator, to_balance(weight))) + .for_each(|(nominator, stake)| { + if nominator == validator { + own = own.saturating_add(stake); + } else { + others.push(IndividualExposure { who: nominator, value: stake }); + } + total = total.saturating_add(stake); + }); - // Insert current era staking informations - >::insert(¤t_era, total_staked); - let default_pref = ValidatorPrefs::default(); - for stash in &elected_stashes { - let pref = all_validators_and_prefs.get(stash) - .unwrap_or(&default_pref); // Must never happen, but better to be safe. - >::insert(¤t_era, stash, pref); - } + let exposure = Exposure { + own, + others, + total, + }; - // In order to keep the property required by `n_session_ending` - // that we must return the new validator set even if it's the same as the old, - // as long as any underlying economic conditions have changed, we don't attempt - // to do any optimization where we compare against the prior set. - Some(elected_stashes) - } else { - // There were not enough candidates for even our minimal level of functionality. - // This is bad. - // We should probably disable all functionality except for block production - // and let the chain keep producing blocks until we can decide on a sufficiently - // substantial set. - // TODO: #2494 - None - } + (validator, exposure) + }).collect::)>>() } /// Remove all associated data of a stash account from the staking system. @@ -1957,6 +2837,33 @@ impl Module { Ok(()) } + /// Clear all era information for given era. + fn clear_era_information(era_index: EraIndex) { + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove(era_index); + >::remove(era_index); + >::remove(era_index); + ErasStartSessionIndex::remove(era_index); + } + + /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. + fn apply_unapplied_slashes(active_era: EraIndex) { + let slash_defer_duration = T::SlashDeferDuration::get(); + ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { + let keep_from = active_era.saturating_sub(slash_defer_duration); + for era in (*earliest)..keep_from { + let era_slashes = ::UnappliedSlashes::take(&era); + for slash in era_slashes { + slashing::apply_slash::(slash); + } + } + + *earliest = (*earliest).max(keep_from) + }) + } + /// Add reward points to validators using their stash account ID. /// /// Validators are keyed by stash account ID and must be in the current elected set. @@ -1989,6 +2896,23 @@ impl Module { _ => ForceEra::put(Forcing::ForceNew), } } + + fn will_era_be_forced() -> bool { + match ForceEra::get() { + Forcing::ForceAlways | Forcing::ForceNew => true, + Forcing::ForceNone | Forcing::NotForcing => false, + } + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn add_era_stakers(current_era: EraIndex, controller: T::AccountId, exposure: Exposure>) { + >::insert(¤t_era, &controller, &exposure); + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn put_election_status(status: ElectionStatus::) { + >::put(status); + } } /// In this implementation `new_session(session)` must be called before `end_session(session-1)` @@ -2008,9 +2932,7 @@ impl pallet_session::SessionManager for Module { } } -/// This implementation has the same constrains as the implementation of -/// `pallet_session::SessionManager`. -impl SessionManager>> for Module { +impl historical::SessionManager>> for Module { fn new_session(new_index: SessionIndex) -> Option>)>> { @@ -2096,15 +3018,20 @@ impl OnOffenceHandler>], slash_fraction: &[Perbill], slash_session: SessionIndex, - ) { + ) -> Result<(), ()> { + if !Self::can_report() { + return Err(()) + } + let reward_proportion = SlashRewardFraction::get(); let active_era = { let active_era = Self::active_era(); if active_era.is_none() { - return + // this offence need not be re-submitted. + return Ok(()) } - active_era.unwrap().index + active_era.expect("value checked not to be `None`; qed").index }; let active_era_start_session_index = Self::eras_start_session_index(active_era) .unwrap_or_else(|| { @@ -2123,7 +3050,7 @@ impl OnOffenceHandler return, // before bonding period. defensive - should be filtered out. + None => return Ok(()), // before bonding period. defensive - should be filtered out. Some(&(ref slash_era, _)) => *slash_era, } }; @@ -2137,8 +3064,7 @@ impl OnOffenceHandler OnOffenceHandler bool { + Self::era_election_status().is_closed() } } @@ -2198,3 +3130,75 @@ impl ReportOffence } } } + +impl From> for InvalidTransaction { + fn from(e: Error) -> Self { + InvalidTransaction::Custom(e.as_u8()) + } +} + +#[allow(deprecated)] +impl frame_support::unsigned::ValidateUnsigned for Module { + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_election_solution_unsigned( + _, + _, + score, + era, + ) = call { + use offchain_election::DEFAULT_LONGEVITY; + + // discard solution not coming from the local OCW. + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + _ => { + log!(debug, "rejecting unsigned transaction because it is not local/in-block."); + return InvalidTransaction::Call.into(); + } + } + + if let Err(e) = Self::pre_dispatch_checks(*score, *era) { + log!(debug, "validate unsigned pre dispatch checks failed due to {:?}.", e); + return InvalidTransaction::from(e).into(); + } + + log!(debug, "validateUnsigned succeeded for a solution at era {}.", era); + + ValidTransaction::with_tag_prefix("StakingOffchain") + // The higher the score[0], the better a solution is. + .priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into())) + // Defensive only. A single solution can exist in the pool per era. Each validator + // will run OCW at most once per era, hence there should never exist more than one + // transaction anyhow. + .and_provides(era) + // Note: this can be more accurate in the future. We do something like + // `era_end_block - current_block` but that is not needed now as we eagerly run + // offchain workers now and the above should be same as `T::ElectionLookahead` + // without the need to query more storage in the validation phase. If we randomize + // offchain worker, then we might re-consider this. + .longevity(TryInto::::try_into( + T::ElectionLookahead::get()).unwrap_or(DEFAULT_LONGEVITY) + ) + // We don't propagate this. This can never the validated at a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(_: &Self::Call) -> Result<(), TransactionValidityError> { + // IMPORTANT NOTE: By default, a sane `pre-dispatch` should always do the same checks as + // `validate_unsigned` and overriding this should be done with care. this module has only + // one unsigned entry point, in which we call into `>::pre_dispatch_checks()` + // which is all the important checks that we do in `validate_unsigned`. Hence, we can safely + // override this to save some time. + Ok(()) + } +} + +/// Check that list is sorted and has no duplicates. +fn is_sorted_and_unique(list: &[u32]) -> bool { + list.windows(2).all(|w| w[0] < w[1]) +} diff --git a/frame/staking/src/migration/deprecated.rs b/frame/staking/src/migration/deprecated.rs deleted file mode 100644 index b5ec26d32f62ea45e49eb5778785ffefa63282e8..0000000000000000000000000000000000000000 --- a/frame/staking/src/migration/deprecated.rs +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -/// Deprecated storages used for migration from v1.0.0 to v2.0.0 only. - -use crate::{Trait, BalanceOf, MomentOf, SessionIndex, Exposure, UnlockChunk}; -use codec::{Encode, Decode, HasCompact}; -use frame_support::{decl_module, decl_storage}; -use sp_std::prelude::*; - -/// Reward points of an era. Used to split era total payout between validators. -#[derive(Encode, Decode, Default)] -pub struct EraPoints { - /// Total number of points. Equals the sum of reward points for each validator. - pub total: u32, - /// The reward points earned by a given validator. The index of this vec corresponds to the - /// index into the current validator set. - pub individual: Vec, -} - -#[derive(Encode, Decode)] -pub struct OldStakingLedger { - pub stash: AccountId, - #[codec(compact)] - pub total: Balance, - #[codec(compact)] - pub active: Balance, - pub unlocking: Vec>, -} - -decl_module! { - pub struct Module for enum Call where origin: T::Origin { } -} - -decl_storage! { - pub trait Store for Module as Staking { - pub SlotStake: BalanceOf; - - /// The currently elected validator set keyed by stash account ID. - pub CurrentElected: Vec; - - /// The start of the current era. - pub CurrentEraStart: MomentOf; - - /// The session index at which the current era started. - pub CurrentEraStartSessionIndex: SessionIndex; - - /// Rewards for the current era. Using indices of current elected set. - pub CurrentEraPointsEarned: EraPoints; - - /// Nominators for a particular account that is in action right now. You can't iterate - /// through validators here, but you can find them in the Session module. - /// - /// This is keyed by the stash account. - pub Stakers: map hasher(blake2_256) T::AccountId => Exposure>; - - /// Old upgrade flag. - pub IsUpgraded: bool; - } -} diff --git a/frame/staking/src/migration/mod.rs b/frame/staking/src/migration/mod.rs deleted file mode 100644 index 971e409189188654b79df2029c5857049e0b8f25..0000000000000000000000000000000000000000 --- a/frame/staking/src/migration/mod.rs +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Update storage from v1.0.0 to v2.0.0 -//! -//! In old version the staking module has several issue about handling session delay, the -//! current era was always considered the active one. -//! -//! After the migration the current era will still be considered the active one for the era of -//! the upgrade. And the delay issue will be fixed when planning the next era. -// * create: -// * ActiveEraStart -// * ErasRewardPoints -// * ActiveEra -// * ErasStakers -// * ErasStakersClipped -// * ErasValidatorPrefs -// * ErasTotalStake -// * ErasStartSessionIndex -// * translate StakingLedger -// * removal of: -// * Stakers -// * SlotStake -// * CurrentElected -// * CurrentEraStart -// * CurrentEraStartSessionIndex -// * CurrentEraPointsEarned - -use super::*; -mod deprecated; -#[cfg(test)] -mod tests; -#[cfg(test)] -mod test_upgrade_from_master_dataset; - -pub fn on_runtime_upgrade() { - match StorageVersion::get() { - Releases::V2_0_0 => return, - Releases::V1_0_0 => upgrade_v1_to_v2::(), - } -} - -fn upgrade_v1_to_v2() { - deprecated::IsUpgraded::kill(); - - let current_era_start_index = deprecated::CurrentEraStartSessionIndex::get(); - let current_era = as Store>::CurrentEra::get().unwrap_or(0); - let current_era_start = deprecated::CurrentEraStart::::get(); - as Store>::ErasStartSessionIndex::insert(current_era, current_era_start_index); - as Store>::ActiveEra::put(ActiveEraInfo { - index: current_era, - start: Some(current_era_start), - }); - - let current_elected = deprecated::CurrentElected::::get(); - let mut current_total_stake = >::zero(); - for validator in ¤t_elected { - let exposure = deprecated::Stakers::::get(validator); - current_total_stake += exposure.total; - as Store>::ErasStakers::insert(current_era, validator, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - as Store>::ErasStakersClipped::insert(current_era, validator, exposure_clipped); - - let pref = as Store>::Validators::get(validator); - as Store>::ErasValidatorPrefs::insert(current_era, validator, pref); - } - as Store>::ErasTotalStake::insert(current_era, current_total_stake); - - let points = deprecated::CurrentEraPointsEarned::get(); - as Store>::ErasRewardPoints::insert(current_era, EraRewardPoints { - total: points.total, - individual: current_elected.iter().cloned().zip(points.individual.iter().cloned()).collect(), - }); - - let res = as Store>::Ledger::translate_values( - |old: deprecated::OldStakingLedger>| StakingLedger { - stash: old.stash, - total: old.total, - active: old.active, - unlocking: old.unlocking, - last_reward: None, - } - ); - if let Err(e) = res { - frame_support::print("Encountered error in migration of Staking::Ledger map."); - frame_support::print("The number of removed key/value is:"); - frame_support::print(e); - } - - - // Kill old storages - deprecated::Stakers::::remove_all(); - deprecated::SlotStake::::kill(); - deprecated::CurrentElected::::kill(); - deprecated::CurrentEraStart::::kill(); - deprecated::CurrentEraStartSessionIndex::kill(); - deprecated::CurrentEraPointsEarned::kill(); - - StorageVersion::put(Releases::V2_0_0); -} diff --git a/frame/staking/src/migration/test_upgrade_from_master_dataset.rs b/frame/staking/src/migration/test_upgrade_from_master_dataset.rs deleted file mode 100644 index 32f9b0a3edb968210f8a49957c26efe2ff4676a7..0000000000000000000000000000000000000000 --- a/frame/staking/src/migration/test_upgrade_from_master_dataset.rs +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2020-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Data set for testing update from previous staking module. -//! -//! Each data set correspond to the storage state for its corresponding block just before the -//! staking initialisation. -//! -//! it has been generated using the following code at commit -//! dc92587bea4032e0a0fc96785bfd9aa17c95459e -//! -//! ```nocompile -//! fn print_storage(i: u32) { -//! let mut storage = vec![]; -//! let mut current_key = vec![]; -//! while let Some(key) = sp_io::storage::next_key(¤t_key) { -//! storage.push((key.clone(), sp_io::storage::get(&key).unwrap())); -//! current_key = key; -//! } -//! println!("const _{}: &[(&[u8], &[u8])] = {:?};", i, storage); -//! } -//! -//! #[test] -//! fn get_states() { -//! let mut ext = ExtBuilder::default().build(); -//! -//! for index in 1..10u32 { -//! ext.execute_with(|| { -//! print_storage(index - 1); -//! System::set_block_number((index).into()); -//! Timestamp::set_timestamp(System::block_number() * 1000); -//! Session::on_initialize(System::block_number()); -//! }); -//! } -//! } -//! ``` - -pub const _0: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[15, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 2, 0, 0, 0])]; -pub const _1: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[16, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[1, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 3, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[232, 3, 0, 0, 0, 0, 0, 0])]; -pub const _2: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[2, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[17, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 4, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[208, 7, 0, 0, 0, 0, 0, 0])]; -pub const _3: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[3, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[19, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[3, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 5, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[184, 11, 0, 0, 0, 0, 0, 0])]; -pub const _4: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[4, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[20, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[4, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 6, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[160, 15, 0, 0, 0, 0, 0, 0])]; -pub const _5: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[5, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[21, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[5, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 7, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[136, 19, 0, 0, 0, 0, 0, 0])]; -pub const _6: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[6, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[23, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[6, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 8, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[112, 23, 0, 0, 0, 0, 0, 0])]; -pub const _7: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[7, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[24, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[7, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 157, 255, 135, 106, 75, 148, 45, 10, 151, 17, 209, 130, 33, 137, 143, 17, 202, 57, 117, 21, 137, 235, 244, 212, 157, 116, 159, 107, 62, 73, 50, 146], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 9, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[88, 27, 0, 0, 0, 0, 0, 0])]; -pub const _8: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[25, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[8, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 47, 1, 107, 122, 93, 185, 48, 218, 189, 234, 3, 170, 104, 210, 115, 77, 47, 164, 122, 5, 87, 226, 13, 19, 12, 193, 224, 68, 248, 220, 87, 150], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 157, 255, 135, 106, 75, 148, 45, 10, 151, 17, 209, 130, 33, 137, 143, 17, 202, 57, 117, 21, 137, 235, 244, 212, 157, 116, 159, 107, 62, 73, 50, 146], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 10, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[64, 31, 0, 0, 0, 0, 0, 0])]; diff --git a/frame/staking/src/migration/tests.rs b/frame/staking/src/migration/tests.rs deleted file mode 100644 index db437e08c1873d0ca2b3a890b4625edd0267d592..0000000000000000000000000000000000000000 --- a/frame/staking/src/migration/tests.rs +++ /dev/null @@ -1,220 +0,0 @@ -use crate::*; -use crate::mock::*; -use frame_support::storage::migration::*; -use sp_core::hashing::blake2_256; -use super::test_upgrade_from_master_dataset; -use sp_runtime::traits::OnRuntimeUpgrade; - -#[test] -fn upgrade_works() { - ExtBuilder::default().build().execute_with(|| { - start_era(3); - - assert_eq!(Session::validators(), vec![21, 11]); - - // Insert fake data to check the migration - put_storage_value::>(b"Staking", b"CurrentElected", b"", vec![21, 31]); - put_storage_value::(b"Staking", b"CurrentEraStartSessionIndex", b"", 5); - put_storage_value::>(b"Staking", b"CurrentEraStart", b"", 777); - put_storage_value( - b"Staking", b"Stakers", &blake2_256(&11u64.encode()), - Exposure:: { - total: 10, - own: 10, - others: vec![], - } - ); - put_storage_value( - b"Staking", b"Stakers", &blake2_256(&21u64.encode()), - Exposure:: { - total: 20, - own: 20, - others: vec![], - } - ); - put_storage_value( - b"Staking", b"Stakers", &blake2_256(&31u64.encode()), - Exposure:: { - total: 30, - own: 30, - others: vec![], - } - ); - put_storage_value::<(u32, Vec)>(b"Staking", b"CurrentEraPointsEarned", b"", (12, vec![2, 10])); - ::ErasStakers::remove_all(); - ::ErasStakersClipped::remove_all(); - - ::StorageVersion::put(Releases::V1_0_0); - - // Perform upgrade - Staking::on_runtime_upgrade(); - - assert_eq!(::StorageVersion::get(), Releases::V2_0_0); - - // Check migration - assert_eq!(::ErasStartSessionIndex::get(3).unwrap(), 5); - assert_eq!(::ErasRewardPoints::get(3), EraRewardPoints { - total: 12, - individual: vec![(21, 2), (31, 10)].into_iter().collect(), - }); - assert_eq!(::ActiveEra::get().unwrap().index, 3); - assert_eq!(::ActiveEra::get().unwrap().start, Some(777)); - assert_eq!(::CurrentEra::get().unwrap(), 3); - assert_eq!(::ErasStakers::get(3, 11), Exposure { - total: 0, - own: 0, - others: vec![], - }); - assert_eq!(::ErasStakers::get(3, 21), Exposure { - total: 20, - own: 20, - others: vec![], - }); - assert_eq!(::ErasStakers::get(3, 31), Exposure { - total: 30, - own: 30, - others: vec![], - }); - assert_eq!(::ErasStakersClipped::get(3, 11), Exposure { - total: 0, - own: 0, - others: vec![], - }); - assert_eq!(::ErasStakersClipped::get(3, 21), Exposure { - total: 20, - own: 20, - others: vec![], - }); - assert_eq!(::ErasStakersClipped::get(3, 31), Exposure { - total: 30, - own: 30, - others: vec![], - }); - assert_eq!(::ErasValidatorPrefs::get(3, 21), Staking::validators(21)); - assert_eq!(::ErasValidatorPrefs::get(3, 31), Staking::validators(31)); - assert_eq!(::ErasTotalStake::get(3), 50); - }) -} - -// Test that an upgrade from previous test environment works. -#[test] -fn test_upgrade_from_master_works() { - let data_sets = &[ - test_upgrade_from_master_dataset::_0, - test_upgrade_from_master_dataset::_1, - test_upgrade_from_master_dataset::_2, - test_upgrade_from_master_dataset::_3, - test_upgrade_from_master_dataset::_4, - test_upgrade_from_master_dataset::_5, - test_upgrade_from_master_dataset::_6, - test_upgrade_from_master_dataset::_7, - test_upgrade_from_master_dataset::_8, - ]; - for data_set in data_sets.iter() { - let mut storage = sp_runtime::Storage::default(); - for (key, value) in data_set.iter() { - storage.top.insert(key.to_vec(), value.to_vec()); - } - let mut ext = sp_io::TestExternalities::from(storage); - ext.execute_with(|| { - let old_stakers = - get_storage_value::>(b"Staking", b"CurrentElected", b"").unwrap(); - let old_staker_0 = old_stakers[0]; - let old_staker_1 = old_stakers[1]; - let old_current_era = - get_storage_value::(b"Staking", b"CurrentEra", b"").unwrap(); - let old_staker_0_exposure = get_storage_value::>( - b"Staking", b"Stakers", &blake2_256(&old_staker_0.encode()) - ).unwrap(); - let old_staker_1_exposure = get_storage_value::>( - b"Staking", b"Stakers", &blake2_256(&old_staker_1.encode()) - ).unwrap(); - let ( - old_era_points_earned_total, - old_era_points_earned_individual - ) = get_storage_value::<(u32, Vec)>(b"Staking", b"CurrentEraPointsEarned", b"") - .unwrap_or((0, vec![])); - - Staking::on_runtime_upgrade(); - assert!(::StorageVersion::get() == Releases::V2_0_0); - - // Check ActiveEra and CurrentEra - let active_era = Staking::active_era().unwrap().index; - let current_era = Staking::current_era().unwrap(); - assert!(current_era == active_era); - assert!(current_era == old_current_era); - - // Check ErasStartSessionIndex - let active_era_start = Staking::eras_start_session_index(active_era).unwrap(); - let current_era_start = Staking::eras_start_session_index(current_era).unwrap(); - let current_session_index = Session::current_index(); - assert!(current_era_start == active_era_start); - assert!(active_era_start <= current_session_index); - assert_eq!(::ErasStartSessionIndex::iter().count(), 1); - - // Check ErasStakers - assert_eq!(::ErasStakers::iter().count(), 2); - assert_eq!( - ::ErasStakers::get(current_era, old_staker_0), - old_staker_0_exposure - ); - assert_eq!( - ::ErasStakers::get(current_era, old_staker_1), - old_staker_1_exposure - ); - - // Check ErasStakersClipped - assert_eq!(::ErasStakersClipped::iter().count(), 2); - assert!(::ErasStakersClipped::iter().all(|exposure_clipped| { - let max = ::MaxNominatorRewardedPerValidator::get() as usize; - exposure_clipped.others.len() <= max - })); - assert_eq!( - ::ErasStakersClipped::get(current_era, old_staker_0), - old_staker_0_exposure - ); - assert_eq!( - ::ErasStakersClipped::get(current_era, old_staker_1), - old_staker_1_exposure - ); - - // Check ErasValidatorPrefs - assert_eq!(::ErasValidatorPrefs::iter().count(), 2); - assert_eq!( - ::ErasValidatorPrefs::get(current_era, old_staker_0), - Staking::validators(old_staker_0) - ); - assert_eq!( - ::ErasValidatorPrefs::get(current_era, old_staker_1), - Staking::validators(old_staker_1) - ); - - // Check ErasTotalStake - assert_eq!(::ErasTotalStake::iter().count(), 1); - assert_eq!( - ::ErasTotalStake::get(current_era), - old_staker_0_exposure.total + old_staker_1_exposure.total - ); - - // Check ErasRewardPoints - assert_eq!(::ErasRewardPoints::iter().count(), 1); - let mut individual = BTreeMap::new(); - if let Some(p) = old_era_points_earned_individual.get(0) { - individual.insert(old_staker_0, p.clone()); - } - if let Some(p) = old_era_points_earned_individual.get(1) { - individual.insert(old_staker_1, p.clone()); - } - assert_eq!( - ::ErasRewardPoints::get(current_era), - EraRewardPoints { - total: old_era_points_earned_total, - individual, - } - ); - - // Check ErasValidatorReward - assert_eq!(::ErasValidatorReward::iter().count(), 0); - }); - } -} diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index b7cae91bedfa5262748ead3f1974d8f9e4a36c34..6332486b65099fa2eb57b8235ba1cf971df06f48 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -17,58 +17,72 @@ //! Test utilities use std::{collections::{HashSet, HashMap}, cell::RefCell}; -use sp_runtime::{Perbill, KeyTypeId}; +use sp_runtime::Perbill; use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::traits::{IdentityLookup, Convert, OpaqueKeys, OnInitialize, OnFinalize, SaturatedConversion}; -use sp_runtime::testing::{Header, UintAuthorityId}; +use sp_runtime::traits::{IdentityLookup, Convert, SaturatedConversion, Zero}; +use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; use sp_staking::{SessionIndex, offence::{OffenceDetails, OnOffenceHandler}}; -use sp_core::{H256, crypto::key_types}; -use sp_io; +use sp_core::H256; use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, StorageLinkedMap, StorageValue, StorageMap, - StorageDoubleMap, - traits::{Currency, Get, FindAuthor}, + assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, impl_outer_event, + StorageValue, StorageMap, StorageDoubleMap, IterableStorageMap, + traits::{Currency, Get, FindAuthor, OnFinalize, OnInitialize}, weights::Weight, }; -use crate::{ - EraIndex, GenesisConfig, Module, Trait, StakerStatus, ValidatorPrefs, RewardDestination, - Nominators, inflation, SessionInterface, Exposure, ErasStakers, ErasRewardPoints +use frame_system::offchain::TransactionSubmitter; +use sp_io; +use sp_phragmen::{ + build_support_map, evaluate_support, reduce, ExtendedBalance, StakedAssignment, PhragmenScore, }; +use crate::*; + +const INIT_TIMESTAMP: u64 = 30_000; /// The AccountId alias in this test module. -pub type AccountId = u64; -pub type BlockNumber = u64; -pub type Balance = u64; +pub(crate) type AccountId = u64; +pub(crate) type AccountIndex = u64; +pub(crate) type BlockNumber = u64; +pub(crate) type Balance = u64; /// Simple structure that exposes how u64 currency can be represented as... u64. pub struct CurrencyToVoteHandler; impl Convert for CurrencyToVoteHandler { - fn convert(x: u64) -> u64 { x } + fn convert(x: u64) -> u64 { + x + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { x.saturated_into() } + fn convert(x: u128) -> u64 { + x.saturated_into() + } } thread_local! { static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); + static SESSION_PER_ERA: RefCell = RefCell::new(3); static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); + static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); + static PERIOD: RefCell = RefCell::new(1); } -pub struct TestSessionHandler; -impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[key_types::DUMMY]; +/// Another session handler struct to test on_disabled. +pub struct OtherSessionHandler; +impl pallet_session::OneSessionHandler for OtherSessionHandler { + type Key = UintAuthorityId; - fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + fn on_genesis_session<'a, I: 'a>(_: I) + where I: Iterator, AccountId: 'a {} - fn on_new_session( - _changed: bool, - validators: &[(AccountId, Ks)], - _queued_validators: &[(AccountId, Ks)], - ) { - SESSION.with(|x| - *x.borrow_mut() = (validators.iter().map(|x| x.0.clone()).collect(), HashSet::new()) - ); + fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I,) + where I: Iterator, AccountId: 'a + { + SESSION.with(|x| { + *x.borrow_mut() = ( + validators.map(|x| x.0.clone()).collect(), + HashSet::new(), + ) + }); } fn on_disabled(validator_index: usize) { @@ -80,6 +94,10 @@ impl pallet_session::SessionHandler for TestSessionHandler { } } +impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { + type Public = UintAuthorityId; +} + pub fn is_disabled(controller: AccountId) -> bool { let stash = Staking::ledger(&controller).unwrap().stash; SESSION.with(|d| d.borrow().1.contains(&stash)) @@ -92,6 +110,32 @@ impl Get for ExistentialDeposit { } } +pub struct SessionsPerEra; +impl Get for SessionsPerEra { + fn get() -> SessionIndex { + SESSION_PER_ERA.with(|v| *v.borrow()) + } +} +impl Get for SessionsPerEra { + fn get() -> BlockNumber { + SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) + } +} + +pub struct ElectionLookahead; +impl Get for ElectionLookahead { + fn get() -> BlockNumber { + ELECTION_LOOKAHEAD.with(|v| *v.borrow()) + } +} + +pub struct Period; +impl Get for Period { + fn get() -> BlockNumber { + PERIOD.with(|v| *v.borrow()) + } +} + pub struct SlashDeferDuration; impl Get for SlashDeferDuration { fn get() -> EraIndex { @@ -99,23 +143,47 @@ impl Get for SlashDeferDuration { } } -impl_outer_origin!{ +impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + staking::Staking, + } +} + +mod staking { + // Re-export needed for `impl_outer_event!`. + pub use super::super::*; +} +use frame_system as system; +use pallet_balances as balances; +use pallet_session as session; + +impl_outer_event! { + pub enum MetaEvent for Test { + system, + balances, + session, + staking, + } +} + /// Author of block is always 11 pub struct Author11; impl FindAuthor for Author11 { fn find_author<'a, I>(_digests: I) -> Option - where I: 'a + IntoIterator + where I: 'a + IntoIterator, { Some(11) } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. -#[derive(Clone, PartialEq, Eq, Debug)] +#[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; + parameter_types! { pub const BlockHashCount: u64 = 250; pub const MaximumBlockWeight: Weight = 1024; @@ -124,15 +192,15 @@ parameter_types! { } impl frame_system::Trait for Test { type Origin = Origin; - type Index = u64; + type Index = AccountIndex; type BlockNumber = BlockNumber; - type Call = (); + type Call = Call; type Hash = H256; type Hashing = ::sp_runtime::traits::BlakeTwo256; type AccountId = AccountId; type Lookup = IdentityLookup; type Header = Header; - type Event = (); + type Event = MetaEvent; type BlockHashCount = BlockHashCount; type MaximumBlockWeight = MaximumBlockWeight; type AvailableBlockRatio = AvailableBlockRatio; @@ -145,26 +213,31 @@ impl frame_system::Trait for Test { } impl pallet_balances::Trait for Test { type Balance = Balance; + type Event = MetaEvent; type DustRemoval = (); - type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; } parameter_types! { - pub const Period: BlockNumber = 1; pub const Offset: BlockNumber = 0; pub const UncleGenerations: u64 = 0; pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } +sp_runtime::impl_opaque_keys! { + pub struct SessionKeys { + pub other: OtherSessionHandler, + } +} impl pallet_session::Trait for Test { - type Event = (); + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type Event = MetaEvent; type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type SessionHandler = TestSessionHandler; - type Keys = UintAuthorityId; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; } impl pallet_session::historical::Trait for Test { @@ -196,17 +269,18 @@ pallet_staking_reward_curve::build! { ); } parameter_types! { - pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const UnsignedPriority: u64 = 1 << 20; } + impl Trait for Test { - type Currency = pallet_balances::Module; - type Time = pallet_timestamp::Module; + type Currency = Balances; + type UnixTime = Timestamp; type CurrencyToVote = CurrencyToVoteHandler; type RewardRemainder = (); - type Event = (); + type Event = MetaEvent; type Slash = (); type Reward = (); type SessionsPerEra = SessionsPerEra; @@ -215,11 +289,22 @@ impl Trait for Test { type BondingDuration = BondingDuration; type SessionInterface = Self; type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type SubmitTransaction = SubmitTransaction; type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = UnsignedPriority; } +pub type Extrinsic = TestXt; +type SubmitTransaction = TransactionSubmitter<(), Test, Extrinsic>; + pub struct ExtBuilder { - existential_deposit: u64, + session_length: BlockNumber, + election_lookahead: BlockNumber, + session_per_era: SessionIndex, + existential_deposit: Balance, validator_pool: bool, nominate: bool, validator_count: u32, @@ -227,12 +312,16 @@ pub struct ExtBuilder { slash_defer_duration: EraIndex, fair: bool, num_validators: Option, - invulnerables: Vec, + invulnerables: Vec, + has_stakers: bool, } impl Default for ExtBuilder { fn default() -> Self { Self { + session_length: 1, + election_lookahead: 0, + session_per_era: 3, existential_deposit: 1, validator_pool: false, nominate: true, @@ -242,6 +331,7 @@ impl Default for ExtBuilder { fair: true, num_validators: None, invulnerables: vec![], + has_stakers: true, } } } @@ -283,13 +373,40 @@ impl ExtBuilder { self.invulnerables = invulnerables; self } - pub fn set_associated_consts(&self) { + pub fn session_per_era(mut self, length: SessionIndex) -> Self { + self.session_per_era = length; + self + } + pub fn election_lookahead(mut self, look: BlockNumber) -> Self { + self.election_lookahead = look; + self + } + pub fn session_length(mut self, length: BlockNumber) -> Self { + self.session_length = length; + self + } + pub fn has_stakers(mut self, has: bool) -> Self { + self.has_stakers = has; + self + } + pub fn offchain_phragmen_ext(self) -> Self { + self.session_per_era(4) + .session_length(5) + .election_lookahead(3) + } + pub fn set_associated_constants(&self) { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); + SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); + ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); + PERIOD.with(|v| *v.borrow_mut() = self.session_length); } pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut storage = frame_system::GenesisConfig::default().build_storage::().unwrap(); + let _ = env_logger::try_init(); + self.set_associated_constants(); + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); let balance_factor = if self.existential_deposit > 1 { 256 } else { @@ -301,7 +418,7 @@ impl ExtBuilder { .map(|x| ((x + 1) * 10 + 1) as u64) .collect::>(); - let _ = pallet_balances::GenesisConfig::{ + let _ = pallet_balances::GenesisConfig:: { balances: vec![ (1, 10 * balance_factor), (2, 20 * balance_factor), @@ -322,16 +439,17 @@ impl ExtBuilder { ], }.assimilate_storage(&mut storage); - let stake_21 = if self.fair { 1000 } else { 2000 }; - let stake_31 = if self.validator_pool { balance_factor * 1000 } else { 1 }; - let status_41 = if self.validator_pool { - StakerStatus::::Validator - } else { - StakerStatus::::Idle - }; - let nominated = if self.nominate { vec![11, 21] } else { vec![] }; - let _ = GenesisConfig::{ - stakers: vec![ + let mut stakers = vec![]; + if self.has_stakers { + let stake_21 = if self.fair { 1000 } else { 2000 }; + let stake_31 = if self.validator_pool { balance_factor * 1000 } else { 1 }; + let status_41 = if self.validator_pool { + StakerStatus::::Validator + } else { + StakerStatus::::Idle + }; + let nominated = if self.nominate { vec![11, 21] } else { vec![] }; + stakers = vec![ // (stash, controller, staked_amount, status) (11, 10, balance_factor * 1000, StakerStatus::::Validator), (21, 20, stake_21, StakerStatus::::Validator), @@ -339,25 +457,40 @@ impl ExtBuilder { (41, 40, balance_factor * 1000, status_41), // nominator (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)) - ], + ]; + } + let _ = GenesisConfig::{ + stakers: stakers, validator_count: self.validator_count, minimum_validator_count: self.minimum_validator_count, invulnerables: self.invulnerables, slash_reward_fraction: Perbill::from_percent(10), ..Default::default() - }.assimilate_storage(&mut storage); + } + .assimilate_storage(&mut storage); let _ = pallet_session::GenesisConfig:: { - keys: validators.iter().map(|x| (*x, *x, UintAuthorityId(*x))).collect(), + keys: validators.iter().map(|x| ( + *x, + *x, + SessionKeys { other: UintAuthorityId(*x) } + )).collect(), }.assimilate_storage(&mut storage); let mut ext = sp_io::TestExternalities::from(storage); ext.execute_with(|| { let validators = Session::validators(); - SESSION.with(|x| - *x.borrow_mut() = (validators.clone(), HashSet::new()) - ); + SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); }); + + // We consider all test to start after timestamp is initialized + // This must be ensured by having `timestamp::on_initialize` called before + // `staking::on_initialize` + ext.execute_with(|| { + System::set_block_number(1); + Timestamp::set_timestamp(INIT_TIMESTAMP); + }); + ext } } @@ -368,20 +501,25 @@ pub type Session = pallet_session::Module; pub type Timestamp = pallet_timestamp::Module; pub type Staking = Module; +pub fn active_era() -> EraIndex { + Staking::active_era().unwrap().index +} + pub fn check_exposure_all(era: EraIndex) { ErasStakers::::iter_prefix(era).for_each(check_exposure) } pub fn check_nominator_all(era: EraIndex) { - >::enumerate() + >::iter() .for_each(|(acc, _)| check_nominator_exposure(era, acc)); } /// Check for each selected validator: expo.total = Sum(expo.other) + expo.own pub fn check_exposure(expo: Exposure) { assert_eq!( - expo.total as u128, expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), - "wrong total exposure {:?}", expo, + expo.total as u128, + expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), + "wrong total exposure", ); } @@ -390,17 +528,18 @@ pub fn check_exposure(expo: Exposure) { pub fn check_nominator_exposure(era: EraIndex, stash: AccountId) { assert_is_stash(stash); let mut sum = 0; - ErasStakers::::iter_prefix(era) - .for_each(|exposure| { - exposure.others.iter() - .filter(|i| i.who == stash) - .for_each(|i| sum += i.value) - }); + Session::validators() + .iter() + .map(|v| Staking::eras_stakers(era, v)) + .for_each(|e| e.others.iter().filter(|i| i.who == stash).for_each(|i| sum += i.value)); let nominator_stake = Staking::slashable_balance_of(&stash); // a nominator cannot over-spend. assert!( nominator_stake >= sum, - "failed: Nominator({}) stake({}) >= sum divided({})", stash, nominator_stake, sum, + "failed: Nominator({}) stake({}) >= sum divided({})", + stash, + nominator_stake, + sum, ); } @@ -416,20 +555,43 @@ pub fn assert_ledger_consistent(stash: AccountId) { assert_eq!(real_total, ledger.total); } -pub fn bond_validator(acc: u64, val: u64) { - // a = controller - // a + 1 = stash - let _ = Balances::make_free_balance_be(&(acc + 1), val); - assert_ok!(Staking::bond(Origin::signed(acc + 1), acc, val, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(acc), ValidatorPrefs::default())); -} - -pub fn bond_nominator(acc: u64, val: u64, target: Vec) { - // a = controller - // a + 1 = stash - let _ = Balances::make_free_balance_be(&(acc + 1), val); - assert_ok!(Staking::bond(Origin::signed(acc + 1), acc, val, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(acc), target)); +pub fn bond_validator(stash: u64, ctrl: u64, val: u64) { + let _ = Balances::make_free_balance_be(&stash, val); + let _ = Balances::make_free_balance_be(&ctrl, val); + assert_ok!(Staking::bond( + Origin::signed(stash), + ctrl, + val, + RewardDestination::Controller, + )); + assert_ok!(Staking::validate( + Origin::signed(ctrl), + ValidatorPrefs::default() + )); +} + +pub fn bond_nominator(stash: u64, ctrl: u64, val: u64, target: Vec) { + let _ = Balances::make_free_balance_be(&stash, val); + let _ = Balances::make_free_balance_be(&ctrl, val); + assert_ok!(Staking::bond( + Origin::signed(stash), + ctrl, + val, + RewardDestination::Controller, + )); + assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); +} + +pub fn run_to_block(n: BlockNumber) { + Staking::on_finalize(System::block_number()); + for b in System::block_number() + 1..=n { + System::set_block_number(b); + Session::on_initialize(b); + Staking::on_initialize(b); + if b != n { + Staking::on_finalize(System::block_number()); + } + } } pub fn advance_session() { @@ -438,19 +600,21 @@ pub fn advance_session() { } pub fn start_session(session_index: SessionIndex) { + assert_eq!(>::get(), 1, "start_session can only be used with session length 1."); for i in Session::current_index()..session_index { Staking::on_finalize(System::block_number()); System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000); + Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); Session::on_initialize(System::block_number()); + Staking::on_initialize(System::block_number()); } assert_eq!(Session::current_index(), session_index); } pub fn start_era(era_index: EraIndex) { - start_session((era_index * 3).into()); - assert_eq!(Staking::active_era().unwrap().index, era_index); + start_session((era_index * >::get()).into()); + assert_eq!(Staking::current_era().unwrap(), era_index); } pub fn current_total_payout_for_duration(duration: u64) -> u64 { @@ -463,33 +627,45 @@ pub fn current_total_payout_for_duration(duration: u64) -> u64 { } pub fn reward_all_elected() { - let rewards = ::SessionInterface::validators().into_iter() + let rewards = ::SessionInterface::validators() + .into_iter() .map(|v| (v, 1)); >::reward_by_ids(rewards) } pub fn validator_controllers() -> Vec { - Session::validators().into_iter().map(|s| Staking::bonded(&s).expect("no controller for validator")).collect() + Session::validators() + .into_iter() + .map(|s| Staking::bonded(&s).expect("no controller for validator")) + .collect() } pub fn on_offence_in_era( - offenders: &[OffenceDetails>], + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], slash_fraction: &[Perbill], era: EraIndex, ) { let bonded_eras = crate::BondedEras::get(); for &(bonded_era, start_session) in bonded_eras.iter() { if bonded_era == era { - Staking::on_offence(offenders, slash_fraction, start_session); - return + let _ = Staking::on_offence(offenders, slash_fraction, start_session).unwrap(); + return; } else if bonded_era > era { - break + break; } } if Staking::active_era().unwrap().index == era { - Staking::on_offence(offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap()); + let _ = + Staking::on_offence( + offenders, + slash_fraction, + Staking::eras_start_session_index(era).unwrap() + ).unwrap(); } else { panic!("cannot slash in era {}", era); } @@ -503,8 +679,193 @@ pub fn on_offence_now( on_offence_in_era(offenders, slash_fraction, now) } +// winners will be chosen by simply their unweighted total backing stake. Nominator stake is +// distributed evenly. +pub fn horrible_phragmen_with_post_processing( + do_reduce: bool, +) -> (CompactAssignments, Vec, PhragmenScore) { + let mut backing_stake_of: BTreeMap = BTreeMap::new(); + + // self stake + >::iter().for_each(|(who, _p)| { + *backing_stake_of.entry(who).or_insert(Zero::zero()) += Staking::slashable_balance_of(&who) + }); + + // add nominator stuff + >::iter().for_each(|(who, nomination)| { + nomination.targets.iter().for_each(|v| { + *backing_stake_of.entry(*v).or_insert(Zero::zero()) += + Staking::slashable_balance_of(&who) + }) + }); + + // elect winners + let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); + sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); + let winners: Vec = sorted + .iter() + .cloned() + .take(Staking::validator_count() as usize) + .collect(); + + // create assignments + let mut staked_assignment: Vec> = Vec::new(); + >::iter().for_each(|(who, nomination)| { + let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new(); + nomination.targets.iter().for_each(|v| { + if winners.iter().find(|w| *w == v).is_some() { + dist.push((*v, ExtendedBalance::zero())); + } + }); + + if dist.len() == 0 { + return; + } + + // assign real stakes. just split the stake. + let stake = Staking::slashable_balance_of(&who) as ExtendedBalance; + let mut sum: ExtendedBalance = Zero::zero(); + let dist_len = dist.len(); + { + dist.iter_mut().for_each(|(_, w)| { + let partial = stake / (dist_len as ExtendedBalance); + *w = partial; + sum += partial; + }); + } + + // assign the leftover to last. + { + let leftover = stake - sum; + let last = dist.last_mut().unwrap(); + last.1 += leftover; + } + + staked_assignment.push(StakedAssignment { + who, + distribution: dist, + }); + }); + + // Ensure that this result is worse than seq-phragmen. Otherwise, it should not have been used + // for testing. + let score = { + let (_, _, better_score) = prepare_submission_with(true, |_| {}); + + let support = build_support_map::(&winners, &staked_assignment).0; + let score = evaluate_support(&support); + + assert!(sp_phragmen::is_score_better(score, better_score)); + + score + }; + + if do_reduce { + reduce(&mut staked_assignment); + } + + let snapshot_validators = Staking::snapshot_validators().unwrap(); + let snapshot_nominators = Staking::snapshot_nominators().unwrap(); + let nominator_index = |a: &AccountId| -> Option { + snapshot_nominators.iter().position(|x| x == a).map(|i| i as NominatorIndex) + }; + let validator_index = |a: &AccountId| -> Option { + snapshot_validators.iter().position(|x| x == a).map(|i| i as ValidatorIndex) + }; + + // convert back to ratio assignment. This takes less space. + let assignments_reduced = + sp_phragmen::assignment_staked_to_ratio::(staked_assignment); + + let compact = + CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) + .unwrap(); + + // winner ids to index + let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); + + (compact, winners, score) +} + +// Note: this should always logically reproduce [`offchain_election::prepare_submission`], yet we +// cannot do it since we want to have `tweak` injected into the process. +pub fn prepare_submission_with( + do_reduce: bool, + tweak: impl FnOnce(&mut Vec>), +) -> (CompactAssignments, Vec, PhragmenScore) { + // run phragmen on the default stuff. + let sp_phragmen::PhragmenResult { + winners, + assignments, + } = Staking::do_phragmen::().unwrap(); + let winners = winners.into_iter().map(|(w, _)| w).collect::>(); + + let stake_of = |who: &AccountId| -> ExtendedBalance { + >::convert( + Staking::slashable_balance_of(&who) + ) as ExtendedBalance + }; + let mut staked = sp_phragmen::assignment_ratio_to_staked(assignments, stake_of); + + // apply custom tweaks. awesome for testing. + tweak(&mut staked); + + if do_reduce { + reduce(&mut staked); + } + + // convert back to ratio assignment. This takes less space. + let snapshot_validators = Staking::snapshot_validators().expect("snapshot not created."); + let snapshot_nominators = Staking::snapshot_nominators().expect("snapshot not created."); + let nominator_index = |a: &AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .map_or_else( + || { println!("unable to find nominator index for {:?}", a); None }, + |i| Some(i as NominatorIndex), + ) + }; + let validator_index = |a: &AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .map_or_else( + || { println!("unable to find validator index for {:?}", a); None }, + |i| Some(i as ValidatorIndex), + ) + }; + + let assignments_reduced = sp_phragmen::assignment_staked_to_ratio(staked); + + // re-compute score by converting, yet again, into staked type + let score = { + let staked = sp_phragmen::assignment_ratio_to_staked( + assignments_reduced.clone(), + Staking::slashable_balance_of_extended, + ); + + let (support_map, _) = build_support_map::( + winners.as_slice(), + staked.as_slice(), + ); + evaluate_support::(&support_map) + }; + + let compact = + CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) + .map_err(|e| { println!("error in compact: {:?}", e); e }) + .expect("Failed to create compact"); + + + // winner ids to index + let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); + + (compact, winners, score) +} + /// Make all validator and nominator request their payment -pub fn make_all_reward_payment(era: EraIndex) { +pub fn make_all_reward_payment_before_migration(era: EraIndex) { let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() .cloned() .collect::>(); @@ -534,3 +895,37 @@ pub fn make_all_reward_payment(era: EraIndex) { assert_ok!(Staking::payout_validator(Origin::signed(validator_controller), era)); } } + +/// Make all validator and nominator request their payment +pub fn make_all_reward_payment(era: EraIndex) { + let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() + .cloned() + .collect::>(); + + // reward validators + for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { + let ledger = >::get(&validator_controller).unwrap(); + + assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); + } +} + +#[macro_export] +macro_rules! assert_session_era { + ($session:expr, $era:expr) => { + assert_eq!( + Session::current_index(), + $session, + "wrong session {} != {}", + Session::current_index(), + $session, + ); + assert_eq!( + Staking::active_era().unwrap().index, + $era, + "wrong active era {} != {}", + Staking::active_era().unwrap().index, + $era, + ); + }; +} diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs new file mode 100644 index 0000000000000000000000000000000000000000..0d4cf49f103b9fd634ad178235159244f4f743b2 --- /dev/null +++ b/frame/staking/src/offchain_election.rs @@ -0,0 +1,219 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helpers for offchain worker election. + +use crate::{ + Call, CompactAssignments, Module, NominatorIndex, OffchainAccuracy, Trait, ValidatorIndex, +}; +use frame_system::offchain::SubmitUnsignedTransaction; +use sp_phragmen::{ + build_support_map, evaluate_support, reduce, Assignment, ExtendedBalance, PhragmenResult, + PhragmenScore, +}; +use sp_runtime::offchain::storage::StorageValueRef; +use sp_runtime::PerThing; +use sp_runtime::RuntimeDebug; +use sp_std::{convert::TryInto, prelude::*}; + +/// Error types related to the offchain election machinery. +#[derive(RuntimeDebug)] +pub enum OffchainElectionError { + /// Phragmen election returned None. This means less candidate that minimum number of needed + /// validators were present. The chain is in trouble and not much that we can do about it. + ElectionFailed, + /// Submission to the transaction pool failed. + PoolSubmissionFailed, + /// The snapshot data is not available. + SnapshotUnavailable, + /// Error from phragmen crate. This usually relates to compact operation. + PhragmenError(sp_phragmen::Error), + /// One of the computed winners is invalid. + InvalidWinner, +} + +impl From for OffchainElectionError { + fn from(e: sp_phragmen::Error) -> Self { + Self::PhragmenError(e) + } +} + +/// Storage key used to store the persistent offchain worker status. +pub(crate) const OFFCHAIN_HEAD_DB: &[u8] = b"parity/staking-election/"; +/// The repeat threshold of the offchain worker. This means we won't run the offchain worker twice +/// within a window of 5 blocks. +pub(crate) const OFFCHAIN_REPEAT: u32 = 5; +/// Default number of blocks for which the unsigned transaction should stay in the pool +pub(crate) const DEFAULT_LONGEVITY: u64 = 25; + +/// Checks if an execution of the offchain worker is permitted at the given block number, or not. +/// +/// This essentially makes sure that we don't run on previous blocks in case of a re-org, and we +/// don't run twice within a window of length [`OFFCHAIN_REPEAT`]. +/// +/// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. +pub(crate) fn set_check_offchain_execution_status( + now: T::BlockNumber, +) -> Result<(), &'static str> { + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); + + let mutate_stat = + storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { + match maybe_head { + Some(Some(head)) if now < head => Err("fork."), + Some(Some(head)) if now >= head && now <= head + threshold => { + Err("recently executed.") + } + Some(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + } + _ => { + // value doesn't exists. Probably this node just booted up. Write, and run + Ok(now) + } + } + }); + + match mutate_stat { + // all good + Ok(Ok(_)) => Ok(()), + // failed to write. + Ok(Err(_)) => Err("failed to write to offchain db."), + // fork etc. + Err(why) => Err(why), + } +} + +/// The internal logic of the offchain worker of this module. This runs the phragmen election, +/// compacts and reduces the solution, computes the score and submits it back to the chain as an +/// unsigned transaction, without any signature. +pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { + // compute raw solution. Note that we use `OffchainAccuracy`. + let PhragmenResult { + winners, + assignments, + } = >::do_phragmen::() + .ok_or(OffchainElectionError::ElectionFailed)?; + + // process and prepare it for submission. + let (winners, compact, score) = prepare_submission::(assignments, winners, true)?; + + // defensive-only: active era can never be none except genesis. + let era = >::active_era().map(|e| e.index).unwrap_or_default(); + + // send it. + let call: ::Call = Call::submit_election_solution_unsigned( + winners, + compact, + score, + era, + ).into(); + + T::SubmitTransaction::submit_unsigned(call) + .map_err(|_| OffchainElectionError::PoolSubmissionFailed) +} + +/// Takes a phragmen result and spits out some data that can be submitted to the chain. +/// +/// This does a lot of stuff; read the inline comments. +pub fn prepare_submission( + assignments: Vec>, + winners: Vec<(T::AccountId, ExtendedBalance)>, + do_reduce: bool, +) -> Result<(Vec, CompactAssignments, PhragmenScore), OffchainElectionError> where + ExtendedBalance: From<::Inner>, +{ + // make sure that the snapshot is available. + let snapshot_validators = + >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; + let snapshot_nominators = + >::snapshot_nominators().ok_or(OffchainElectionError::SnapshotUnavailable)?; + + // all helper closures + let nominator_index = |a: &T::AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let validator_index = |a: &T::AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + + // Clean winners. + let winners = winners + .into_iter() + .map(|(w, _)| w) + .collect::>(); + + // convert into absolute value and to obtain the reduced version. + let mut staked = sp_phragmen::assignment_ratio_to_staked( + assignments, + >::slashable_balance_of_extended, + ); + + if do_reduce { + reduce(&mut staked); + } + + // Convert back to ratio assignment. This takes less space. + let low_accuracy_assignment = sp_phragmen::assignment_staked_to_ratio(staked); + + // convert back to staked to compute the score in the receiver's accuracy. This can be done + // nicer, for now we do it as such since this code is not time-critical. This ensure that the + // score _predicted_ here is the same as the one computed on chain and you will not get a + // `PhragmenBogusScore` error. This is totally NOT needed if we don't do reduce. This whole + // _accuracy glitch_ happens because reduce breaks that assumption of rounding and **scale**. + // The initial phragmen results are computed in `OffchainAccuracy` and the initial `staked` + // assignment set is also all multiples of this value. After reduce, this no longer holds. Hence + // converting to ratio thereafter is not trivially reversible. + let score = { + let staked = sp_phragmen::assignment_ratio_to_staked( + low_accuracy_assignment.clone(), + >::slashable_balance_of_extended, + ); + + let (support_map, _) = build_support_map::(&winners, &staked); + evaluate_support::(&support_map) + }; + + // compact encode the assignment. + let compact = CompactAssignments::from_assignment( + low_accuracy_assignment, + nominator_index, + validator_index, + ).map_err(|e| OffchainElectionError::from(e))?; + + // winners to index. Use a simple for loop for a more expressive early exit in case of error. + let mut winners_indexed: Vec = Vec::with_capacity(winners.len()); + for w in winners { + if let Some(idx) = snapshot_validators.iter().position(|v| *v == w) { + let compact_index: ValidatorIndex = idx + .try_into() + .map_err(|_| OffchainElectionError::InvalidWinner)?; + winners_indexed.push(compact_index); + } else { + return Err(OffchainElectionError::InvalidWinner); + } + } + + Ok((winners_indexed, compact, score)) +} diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 3c8f39501a25e8c8b1fe2306c4d8ceb596b5e457..26f0828989d733bfc720de288f33f0efcafd462d 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -16,11 +16,11 @@ //! A slashing implementation for NPoS systems. //! -//! For the purposes of the economic model, it is easiest to think of each validator -//! of a nominator which nominates only its own identity. +//! For the purposes of the economic model, it is easiest to think of each validator as a nominator +//! which nominates only its own identity. //! -//! The act of nomination signals intent to unify economic identity with the validator - to take part in the -//! rewards of a job well done, and to take part in the punishment of a job done badly. +//! The act of nomination signals intent to unify economic identity with the validator - to take +//! part in the rewards of a job well done, and to take part in the punishment of a job done badly. //! //! There are 3 main difficulties to account for with slashing in NPoS: //! - A nominator can nominate multiple validators and be slashed via any of them. @@ -52,7 +52,7 @@ use super::{ EraIndex, Trait, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, NegativeImbalanceOf, UnappliedSlash, }; -use sp_runtime::{traits::{Zero, Saturating}, PerThing}; +use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug}; use frame_support::{ StorageMap, StorageDoubleMap, traits::{Currency, OnUnbalanced, Imbalance}, @@ -65,7 +65,7 @@ use codec::{Encode, Decode}; const REWARD_F1: Perbill = Perbill::from_percent(50); /// The index of a slashing span - unique to each stash. -pub(crate) type SpanIndex = u32; +pub type SpanIndex = u32; // A range of start..end eras for a slashing span. #[derive(Encode, Decode)] @@ -83,7 +83,7 @@ impl SlashingSpan { } /// An encoding of all of a nominator's slashing spans. -#[derive(Encode, Decode)] +#[derive(Encode, Decode, RuntimeDebug)] pub struct SlashingSpans { // the index of the current slashing span of the nominator. different for // every stash, resets when the account hits free balance 0. @@ -143,7 +143,7 @@ impl SlashingSpans { } /// Yields the era index where the most recent non-zero slash occurred. - pub(crate) fn last_nonzero_slash(&self) -> EraIndex { + pub fn last_nonzero_slash(&self) -> EraIndex { self.last_nonzero_slash } @@ -566,7 +566,7 @@ pub(crate) fn clear_stash_metadata(stash: &T::AccountId) { // apply the slash to a stash account, deducting any missing funds from the reward // payout, saturating at 0. this is mildly unfair but also an edge-case that // can only occur when overlapping locked funds have been slashed. -fn do_slash( +pub fn do_slash( stash: &T::AccountId, value: BalanceOf, reward_payout: &mut BalanceOf, diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..4c1ee66a75692c5883b0868c21d269d3002f5aeb --- /dev/null +++ b/frame/staking/src/testing_utils.rs @@ -0,0 +1,340 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Testing utils for staking. Needs the `testing-utils` feature to be enabled. +//! +//! Note that these helpers should NOT be used with the actual crate tests, but are rather designed +//! for when the module is being externally tested (i.e. fuzzing, benchmarking, e2e tests). Enabling +//! this feature in the current crate's Cargo.toml will leak all of this into a normal release +//! build. Just don't do it. + +use crate::*; +use codec::{Decode, Encode}; +use frame_support::assert_ok; +use frame_system::RawOrigin; +use pallet_indices::address::Address; +use rand::Rng; +use sp_core::hashing::blake2_256; +use sp_phragmen::{ + build_support_map, evaluate_support, reduce, Assignment, PhragmenScore, StakedAssignment, +}; + +const CTRL_PREFIX: u32 = 1000; +const NOMINATOR_PREFIX: u32 = 1_000_000; + +/// A dummy suer. +pub const USER: u32 = 999_999_999; + +/// Address type of the `T` +pub type AddressOf = Address<::AccountId, u32>; + +/// Random number in the range `[a, b]`. +pub fn random(a: u32, b: u32) -> u32 { + rand::thread_rng().gen_range(a, b) +} + +/// Set the desired validator count, with related storage items. +pub fn set_validator_count(to_elect: u32) { + ValidatorCount::put(to_elect); + MinimumValidatorCount::put(to_elect / 2); + >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); +} + +/// Build an account with the given index. +pub fn account(index: u32) -> T::AccountId { + let entropy = (b"benchmark/staking", index).using_encoded(blake2_256); + T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() +} + +/// Build an address given Index +pub fn address(index: u32) -> AddressOf { + pallet_indices::address::Address::Id(account::(index)) +} + +/// Generate signed origin from `who`. +pub fn signed(who: T::AccountId) -> T::Origin { + RawOrigin::Signed(who).into() +} + +/// Generate signed origin from `index`. +pub fn signed_account(index: u32) -> T::Origin { + signed::(account::(index)) +} + +/// Bond a validator. +pub fn bond_validator(stash: T::AccountId, ctrl: u32, val: BalanceOf) +where + T::Lookup: StaticLookup>, +{ + let _ = T::Currency::make_free_balance_be(&stash, val); + assert_ok!(>::bond( + signed::(stash), + address::(ctrl), + val, + RewardDestination::Controller + )); + assert_ok!(>::validate( + signed_account::(ctrl), + ValidatorPrefs::default() + )); +} + +pub fn bond_nominator( + stash: T::AccountId, + ctrl: u32, + val: BalanceOf, + target: Vec>, +) where + T::Lookup: StaticLookup>, +{ + let _ = T::Currency::make_free_balance_be(&stash, val); + assert_ok!(>::bond( + signed::(stash), + address::(ctrl), + val, + RewardDestination::Controller + )); + assert_ok!(>::nominate(signed_account::(ctrl), target)); +} + +/// Bond `nun_validators` validators and `num_nominator` nominators with `edge_per_voter` random +/// votes per nominator. +pub fn setup_chain_stakers(num_validators: u32, num_voters: u32, edge_per_voter: u32) +where + T::Lookup: StaticLookup>, +{ + (0..num_validators).for_each(|i| { + bond_validator::( + account::(i), + i + CTRL_PREFIX, + >::from(random(1, 1000)) * T::Currency::minimum_balance(), + ); + }); + + (0..num_voters).for_each(|i| { + let mut targets: Vec> = Vec::with_capacity(edge_per_voter as usize); + let mut all_targets = (0..num_validators) + .map(|t| address::(t)) + .collect::>(); + assert!(num_validators >= edge_per_voter); + (0..edge_per_voter).for_each(|_| { + let target = all_targets.remove(random(0, all_targets.len() as u32 - 1) as usize); + targets.push(target); + }); + bond_nominator::( + account::(i + NOMINATOR_PREFIX), + i + NOMINATOR_PREFIX + CTRL_PREFIX, + >::from(random(1, 1000)) * T::Currency::minimum_balance(), + targets, + ); + }); + + >::create_stakers_snapshot(); +} + +/// Build a _really bad_ but acceptable solution for election. This should always yield a solution +/// which has a less score than the seq-phragmen. +pub fn get_weak_solution( + do_reduce: bool, +) -> (Vec, CompactAssignments, PhragmenScore) { + let mut backing_stake_of: BTreeMap> = BTreeMap::new(); + + // self stake + >::enumerate().for_each(|(who, _p)| { + *backing_stake_of.entry(who.clone()).or_insert(Zero::zero()) += + >::slashable_balance_of(&who) + }); + + // add nominator stuff + >::enumerate().for_each(|(who, nomination)| { + nomination.targets.into_iter().for_each(|v| { + *backing_stake_of.entry(v).or_insert(Zero::zero()) += + >::slashable_balance_of(&who) + }) + }); + + // elect winners + let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); + sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); + let winners: Vec = sorted + .iter() + .cloned() + .take(>::validator_count() as usize) + .collect(); + + let mut staked_assignments: Vec> = Vec::new(); + >::enumerate().for_each(|(who, nomination)| { + let mut dist: Vec<(T::AccountId, ExtendedBalance)> = Vec::new(); + nomination.targets.into_iter().for_each(|v| { + if winners.iter().find(|&w| *w == v).is_some() { + dist.push((v, ExtendedBalance::zero())); + } + }); + + if dist.len() == 0 { + return; + } + + // assign real stakes. just split the stake. + let stake = , u64>>::convert( + >::slashable_balance_of(&who), + ) as ExtendedBalance; + + let mut sum: ExtendedBalance = Zero::zero(); + let dist_len = dist.len() as ExtendedBalance; + + // assign main portion + // only take the first half into account. This should highly imbalance stuff, which is good. + dist.iter_mut() + .take(if dist_len > 1 { + (dist_len as usize) / 2 + } else { + 1 + }) + .for_each(|(_, w)| { + let partial = stake / dist_len; + *w = partial; + sum += partial; + }); + + // assign the leftover to last. + let leftover = stake - sum; + let last = dist.last_mut().unwrap(); + last.1 += leftover; + + staked_assignments.push(StakedAssignment { + who, + distribution: dist, + }); + }); + + // add self support to winners. + winners.iter().for_each(|w| { + staked_assignments.push(StakedAssignment { + who: w.clone(), + distribution: vec![( + w.clone(), + , u64>>::convert( + >::slashable_balance_of(&w), + ) as ExtendedBalance, + )], + }) + }); + + if do_reduce { + reduce(&mut staked_assignments); + } + + // helpers for building the compact + let snapshot_validators = >::snapshot_validators().unwrap(); + let snapshot_nominators = >::snapshot_nominators().unwrap(); + + let nominator_index = |a: &T::AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let validator_index = |a: &T::AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let stake_of = |who: &T::AccountId| -> ExtendedBalance { + , u64>>::convert( + >::slashable_balance_of(who), + ) as ExtendedBalance + }; + + // convert back to ratio assignment. This takes less space. + let low_accuracy_assignment: Vec> = + staked_assignments + .into_iter() + .map(|sa| sa.into_assignment(true)) + .collect(); + + // re-calculate score based on what the chain will decode. + let score = { + let staked: Vec> = low_accuracy_assignment + .iter() + .map(|a| { + let stake = stake_of(&a.who); + a.clone().into_staked(stake, true) + }) + .collect(); + + let (support_map, _) = + build_support_map::(winners.as_slice(), staked.as_slice()); + evaluate_support::(&support_map) + }; + + // compact encode the assignment. + let compact = CompactAssignments::from_assignment( + low_accuracy_assignment, + nominator_index, + validator_index, + ) + .unwrap(); + + // winners to index. + let winners = winners + .into_iter() + .map(|w| { + snapshot_validators + .iter() + .position(|v| *v == w) + .unwrap() + .try_into() + .unwrap() + }) + .collect::>(); + + (winners, compact, score) +} + +/// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain +/// worker code. +pub fn get_seq_phragmen_solution( + do_reduce: bool, +) -> (Vec, CompactAssignments, PhragmenScore) { + let sp_phragmen::PhragmenResult { + winners, + assignments, + } = >::do_phragmen::().unwrap(); + + offchain_election::prepare_submission::(assignments, winners, do_reduce).unwrap() +} + +/// Remove all validator, nominators, votes and exposures. +pub fn clean(era: EraIndex) + where + ::AccountId: codec::EncodeLike, + u32: codec::EncodeLike, +{ + >::enumerate().for_each(|(k, _)| { + let ctrl = >::bonded(&k).unwrap(); + >::remove(&k); + >::remove(&k); + >::remove(&ctrl); + >::remove(k, era); + }); + >::enumerate().for_each(|(k, _)| >::remove(k)); + >::remove_all(); + >::remove_all(); + >::kill(); + QueuedScore::kill(); +} diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 00a8f854dbb2a9f55a2410e59792df97b4f3fa77..75c4edae22804e18f3509a88bd3083080755f113 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -18,12 +18,13 @@ use super::*; use mock::*; -use sp_runtime::{assert_eq_error_rate, traits::{OnInitialize, BadOrigin}}; +use sp_runtime::{ + assert_eq_error_rate, traits::BadOrigin, +}; use sp_staking::offence::OffenceDetails; use frame_support::{ - assert_ok, assert_noop, - traits::{Currency, ReservableCurrency}, - StorageMap, + assert_ok, assert_noop, StorageMap, + traits::{Currency, ReservableCurrency, OnInitialize}, }; use pallet_balances::Error as BalancesError; use substrate_test_utils::assert_eq_uvec; @@ -65,18 +66,18 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], last_reward: None }) + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( Staking::ledger(&20), - Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], last_reward: None }) + Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) ); // Account 1 does not control any stash assert_eq!(Staking::ledger(&1), None); // ValidatorPrefs are default - assert_eq!(>::enumerate().collect::>(), vec![ + assert_eq!(>::iter().collect::>(), vec![ (31, ValidatorPrefs::default()), (21, ValidatorPrefs::default()), (11, ValidatorPrefs::default()) @@ -84,7 +85,7 @@ fn basic_setup_works() { assert_eq!( Staking::ledger(100), - Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], last_reward: None }) + Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], claimed_rewards: vec![] }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); @@ -139,7 +140,7 @@ fn change_controller_works() { assert_ok!(Staking::set_controller(Origin::signed(11), 5)); - start_era(1); + mock::start_era(1); assert_noop!( Staking::validate(Origin::signed(10), ValidatorPrefs::default()), @@ -221,7 +222,7 @@ fn rewards_should_work() { let total_payout_1 = current_total_payout_for_duration(3 * 1000); assert!(total_payout_1 > 10); // Test is meaningful if reward something - start_era(2); + mock::start_era(2); mock::make_all_reward_payment(1); assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); @@ -253,8 +254,6 @@ fn staking_should_work() { // --- Block 1: start_session(1); - Timestamp::set_timestamp(1); // Initialize time. - // remember + compare this along with the test. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -265,7 +264,6 @@ fn staking_should_work() { start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - let current_era_at_bond = Staking::current_era(); assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); // No effects will be seen so far. @@ -312,7 +310,7 @@ fn staking_should_work() { total: 1500, active: 1500, unlocking: vec![], - last_reward: current_era_at_bond, + claimed_rewards: vec![0], }) ); // e.g. it cannot spend more than 500 that it has free from the total 2000 @@ -337,7 +335,7 @@ fn less_than_needed_candidates_works() { assert_eq!(Staking::minimum_validator_count(), 1); assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); - start_era(1); + mock::start_era(1); // Previous set is selected. NO election algorithm is even executed. assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); @@ -375,7 +373,7 @@ fn no_candidate_emergency_condition() { let _ = Staking::chill(Origin::signed(10)); // trigger era - start_era(1); + mock::start_era(1); // Previous ones are elected. chill is invalidates. TODO: #2494 assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); @@ -455,7 +453,7 @@ fn nominating_and_rewards_should_work() { >::reward_by_ids(vec![(41, 1)]); >::reward_by_ids(vec![(31, 1)]); - start_era(1); + mock::start_era(1); // 10 and 20 have more votes, they will be chosen by phragmen. assert_eq_uvec!(validator_controllers(), vec![20, 10]); @@ -498,7 +496,7 @@ fn nominating_and_rewards_should_work() { >::reward_by_ids(vec![(21, 2)]); >::reward_by_ids(vec![(11, 1)]); - start_era(2); + mock::start_era(2); // nothing else will happen, era ends and rewards are paid again, // it is expected that nominators will also be paid. See below @@ -566,7 +564,7 @@ fn nominators_also_get_slashed() { >::reward_by_ids(vec![(11, 1)]); // new era, pay rewards, - start_era(1); + mock::start_era(1); // Nominator stash didn't collect any. assert_eq!(Balances::total_balance(&2), initial_balance); @@ -648,51 +646,37 @@ fn double_controlling_should_fail() { fn session_and_eras_work() { ExtBuilder::default().build().execute_with(|| { assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(Session::current_index(), 0); - // Block 1: No change. + // Session 1: No change. start_session(1); assert_eq!(Session::current_index(), 1); assert_eq!(Staking::active_era().unwrap().index, 0); - // Block 2: No change. + // Session 2: No change. start_session(2); assert_eq!(Session::current_index(), 2); assert_eq!(Staking::active_era().unwrap().index, 0); - // Block 3: Era increment. + // Session 3: Era increment. start_session(3); assert_eq!(Session::current_index(), 3); assert_eq!(Staking::active_era().unwrap().index, 1); - // Block 4: No change. + // Session 4: No change. start_session(4); assert_eq!(Session::current_index(), 4); assert_eq!(Staking::active_era().unwrap().index, 1); - // Block 5: No change. + // Session 5: No change. start_session(5); assert_eq!(Session::current_index(), 5); assert_eq!(Staking::active_era().unwrap().index, 1); - // Block 6: Era increment. + // Session 6: Era increment. start_session(6); assert_eq!(Session::current_index(), 6); assert_eq!(Staking::active_era().unwrap().index, 2); - - // Block 7: No change. - start_session(7); - assert_eq!(Session::current_index(), 7); - assert_eq!(Staking::active_era().unwrap().index, 2); - - // Block 8: No change. - start_session(8); - assert_eq!(Session::current_index(), 8); - assert_eq!(Staking::active_era().unwrap().index, 2); - - // Block 9: Era increment. - start_session(9); - assert_eq!(Session::current_index(), 9); - assert_eq!(Staking::active_era().unwrap().index, 3); }); } @@ -834,7 +818,7 @@ fn reward_destination_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); // Compute total payout now for whole duration as other parameter won't change @@ -842,7 +826,7 @@ fn reward_destination_works() { assert!(total_payout_0 > 100); // Test is meaningful if reward something >::reward_by_ids(vec![(11, 1)]); - start_era(1); + mock::start_era(1); mock::make_all_reward_payment(0); // Check that RewardDestination is Staked (default) @@ -855,7 +839,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], - last_reward: Some(0), + claimed_rewards: vec![0], })); //Change RewardDestination to Stash @@ -866,7 +850,7 @@ fn reward_destination_works() { assert!(total_payout_1 > 100); // Test is meaningful if reward something >::reward_by_ids(vec![(11, 1)]); - start_era(2); + mock::start_era(2); mock::make_all_reward_payment(1); // Check that RewardDestination is Stash @@ -881,7 +865,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], - last_reward: Some(1), + claimed_rewards: vec![0,1], })); // Change RewardDestination to Controller @@ -895,7 +879,7 @@ fn reward_destination_works() { assert!(total_payout_2 > 100); // Test is meaningful if reward something >::reward_by_ids(vec![(11, 1)]); - start_era(3); + mock::start_era(3); mock::make_all_reward_payment(2); // Check that RewardDestination is Controller @@ -908,7 +892,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], - last_reward: Some(2), + claimed_rewards: vec![0,1,2], })); // Check that amount in staked account is NOT increased. assert_eq!(Balances::free_balance(11), recorded_stash_balance); @@ -930,7 +914,7 @@ fn validator_payment_prefs_work() { >::insert(&11, RewardDestination::Controller); >::insert(&101, RewardDestination::Controller); - start_era(1); + mock::start_era(1); mock::make_all_reward_payment(0); let balance_era_1_10 = Balances::total_balance(&10); @@ -942,7 +926,7 @@ fn validator_payment_prefs_work() { let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); >::reward_by_ids(vec![(11, 1)]); - start_era(2); + mock::start_era(2); mock::make_all_reward_payment(1); let taken_cut = commission * total_payout_1; @@ -974,7 +958,7 @@ fn bond_extra_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); // Give account 11 some large free balance greater than total @@ -988,7 +972,7 @@ fn bond_extra_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); // Call the bond_extra function with a large number, should handle it @@ -999,7 +983,7 @@ fn bond_extra_works() { total: 1000000, active: 1000000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); }); } @@ -1026,7 +1010,7 @@ fn bond_extra_and_withdraw_unbonded_works() { assert_eq!(Balances::total_balance(&10), 1); // confirm that 10 is a normal validator and gets paid at the end of the era. - start_era(1); + mock::start_era(1); // Initial state of 10 assert_eq!(Staking::ledger(&10), Some(StakingLedger { @@ -1034,7 +1018,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000, own: 1000, others: vec![] }); @@ -1046,14 +1030,13 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); // Exposure is a snapshot! only updated after the next era update. assert_ne!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); // trigger next era. - Timestamp::set_timestamp(10); - start_era(2); + mock::start_era(2); assert_eq!(Staking::active_era().unwrap().index, 2); // ledger should be the same. @@ -1062,7 +1045,7 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], })); // Exposure is now updated. assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); @@ -1070,29 +1053,29 @@ fn bond_extra_and_withdraw_unbonded_works() { // Unbond almost all of the funds in stash. Staking::unbond(Origin::signed(10), 1000).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None }) + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] }) ); // Attempting to free the balances now will fail. 2 eras need to pass. Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None })); + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] })); // trigger next era. - start_era(3); + mock::start_era(3); // nothing yet Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None })); + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] })); // trigger next era. - start_era(5); + mock::start_era(5); Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); // Now the value is free and the staking ledger is updated. assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 100, active: 100, unlocking: vec![], last_reward: None })); + stash: 11, total: 100, active: 100, unlocking: vec![], claimed_rewards: vec![] })); }) } @@ -1104,14 +1087,14 @@ fn too_many_unbond_calls_should_not_work() { assert_ok!(Staking::unbond(Origin::signed(10), 1)); } - start_era(1); + mock::start_era(1); // locked at era 1 until 4 assert_ok!(Staking::unbond(Origin::signed(10), 1)); // can't do more. assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); - start_era(3); + mock::start_era(3); assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); // free up. @@ -1143,7 +1126,7 @@ fn rebond_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. - start_era(1); + mock::start_era(1); // Initial state of 10 assert_eq!( @@ -1153,11 +1136,11 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], }) ); - start_era(2); + mock::start_era(2); assert_eq!(Staking::active_era().unwrap().index, 2); // Try to rebond some funds. We get an error since no fund is unbonded. @@ -1178,7 +1161,7 @@ fn rebond_works() { value: 900, era: 2 + 3, }], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1191,7 +1174,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1204,7 +1187,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: vec![UnlockChunk { value: 900, era: 5 }], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1217,7 +1200,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: vec![UnlockChunk { value: 400, era: 5 }], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1230,7 +1213,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1249,7 +1232,7 @@ fn rebond_works() { UnlockChunk { value: 300, era: 5 }, UnlockChunk { value: 300, era: 5 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1265,7 +1248,7 @@ fn rebond_works() { UnlockChunk { value: 300, era: 5 }, UnlockChunk { value: 100, era: 5 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); }) @@ -1288,7 +1271,7 @@ fn rebond_is_fifo() { let _ = Balances::make_free_balance_be(&11, 1000000); // confirm that 10 is a normal validator and gets paid at the end of the era. - start_era(1); + mock::start_era(1); // Initial state of 10 assert_eq!( @@ -1298,11 +1281,11 @@ fn rebond_is_fifo() { total: 1000, active: 1000, unlocking: vec![], - last_reward: None, + claimed_rewards: vec![], }) ); - start_era(2); + mock::start_era(2); // Unbond some of the funds in stash. Staking::unbond(Origin::signed(10), 400).unwrap(); @@ -1315,11 +1298,11 @@ fn rebond_is_fifo() { unlocking: vec![ UnlockChunk { value: 400, era: 2 + 3 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); - start_era(3); + mock::start_era(3); // Unbond more of the funds in stash. Staking::unbond(Origin::signed(10), 300).unwrap(); @@ -1333,11 +1316,11 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); - start_era(4); + mock::start_era(4); // Unbond yet more of the funds in stash. Staking::unbond(Origin::signed(10), 200).unwrap(); @@ -1352,7 +1335,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); @@ -1368,7 +1351,7 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, ], - last_reward: None, + claimed_rewards: vec![], }) ); }) @@ -1394,7 +1377,7 @@ fn reward_to_stake_works() { // Now lets lower account 20 stake assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); - >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], last_reward: None }); + >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], claimed_rewards: vec![] }); // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(3000); @@ -1403,7 +1386,7 @@ fn reward_to_stake_works() { >::reward_by_ids(vec![(21, 1)]); // New era --> rewards are paid --> stakes are changed - start_era(1); + mock::start_era(1); mock::make_all_reward_payment(0); assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); @@ -1413,7 +1396,7 @@ fn reward_to_stake_works() { assert_eq!(_11_balance, 1000 + total_payout_0 / 2); // Trigger another new era as the info are frozen before the era start. - start_era(2); + mock::start_era(2); // -- new infos assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); @@ -1557,7 +1540,7 @@ fn switching_roles() { assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - start_era(1); + mock::start_era(1); // with current nominators 10 and 5 have the most stake assert_eq_uvec!(validator_controllers(), vec![6, 10]); @@ -1571,7 +1554,7 @@ fn switching_roles() { // 2 : 2000 self vote + 250 vote. // Winners: 20 and 2 - start_era(2); + mock::start_era(2); assert_eq_uvec!(validator_controllers(), vec![2, 20]); @@ -1596,7 +1579,7 @@ fn wrong_vote_is_null() { ])); // new block - start_era(1); + mock::start_era(1); assert_eq_uvec!(validator_controllers(), vec![20, 10]); }); @@ -1620,7 +1603,6 @@ fn bond_with_no_staked_value() { ); // bonded with absolute minimum value possible. assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); - let current_era_at_bond = Staking::current_era(); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. @@ -1632,19 +1614,19 @@ fn bond_with_no_staked_value() { active: 0, total: 5, unlocking: vec![UnlockChunk {value: 5, era: 3}], - last_reward: current_era_at_bond, + claimed_rewards: vec![], }) ); - start_era(1); - start_era(2); + mock::start_era(1); + mock::start_era(2); // not yet removed. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); assert!(Staking::ledger(2).is_some()); assert_eq!(Balances::locks(&1)[0].amount, 5); - start_era(3); + mock::start_era(3); // poof. Account 1 is removed from the staking system. assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); @@ -1677,7 +1659,7 @@ fn bond_with_little_staked_value_bounded() { let total_payout_0 = current_total_payout_for_duration(3000); assert!(total_payout_0 > 100); // Test is meaningful if reward something reward_all_elected(); - start_era(1); + mock::start_era(1); mock::make_all_reward_payment(0); // 2 is elected. @@ -1694,7 +1676,7 @@ fn bond_with_little_staked_value_bounded() { let total_payout_1 = current_total_payout_for_duration(3000); assert!(total_payout_1 > 100); // Test is meaningful if reward something reward_all_elected(); - start_era(2); + mock::start_era(2); mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); @@ -1722,7 +1704,6 @@ fn new_era_elects_correct_number_of_validators() { assert_eq!(Staking::validator_count(), 1); assert_eq!(validator_controllers().len(), 1); - System::set_block_number(1); Session::on_initialize(System::block_number()); assert_eq!(validator_controllers().len(), 1); @@ -1737,13 +1718,13 @@ fn phragmen_should_not_overflow_validators() { let _ = Staking::chill(Origin::signed(10)); let _ = Staking::chill(Origin::signed(20)); - bond_validator(2, u64::max_value()); - bond_validator(4, u64::max_value()); + bond_validator(3, 2, u64::max_value()); + bond_validator(5, 4, u64::max_value()); - bond_nominator(6, u64::max_value() / 2, vec![3, 5]); - bond_nominator(8, u64::max_value() / 2, vec![3, 5]); + bond_nominator(7, 6, u64::max_value() / 2, vec![3, 5]); + bond_nominator(9, 8, u64::max_value() / 2, vec![3, 5]); - start_era(1); + mock::start_era(1); assert_eq_uvec!(validator_controllers(), vec![4, 2]); @@ -1760,13 +1741,13 @@ fn phragmen_should_not_overflow_nominators() { let _ = Staking::chill(Origin::signed(10)); let _ = Staking::chill(Origin::signed(20)); - bond_validator(2, u64::max_value() / 2); - bond_validator(4, u64::max_value() / 2); + bond_validator(3, 2, u64::max_value() / 2); + bond_validator(5, 4, u64::max_value() / 2); - bond_nominator(6, u64::max_value(), vec![3, 5]); - bond_nominator(8, u64::max_value(), vec![3, 5]); + bond_nominator(7, 6, u64::max_value(), vec![3, 5]); + bond_nominator(9, 8, u64::max_value(), vec![3, 5]); - start_era(1); + mock::start_era(1); assert_eq_uvec!(validator_controllers(), vec![4, 2]); @@ -1779,13 +1760,13 @@ fn phragmen_should_not_overflow_nominators() { #[test] fn phragmen_should_not_overflow_ultimate() { ExtBuilder::default().nominate(false).build().execute_with(|| { - bond_validator(2, u64::max_value()); - bond_validator(4, u64::max_value()); + bond_validator(3, 2, u64::max_value()); + bond_validator(5, 4, u64::max_value()); - bond_nominator(6, u64::max_value(), vec![3, 5]); - bond_nominator(8, u64::max_value(), vec![3, 5]); + bond_nominator(7, 6, u64::max_value(), vec![3, 5]); + bond_nominator(9, 8, u64::max_value(), vec![3, 5]); - start_era(1); + mock::start_era(1); assert_eq_uvec!(validator_controllers(), vec![4, 2]); @@ -1818,7 +1799,7 @@ fn reward_validator_slashing_validator_doesnt_overflow() { ErasStakers::::insert(0, 11, &exposure); ErasStakersClipped::::insert(0, 11, exposure); ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_validator(Origin::signed(10), 0)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -1924,21 +1905,23 @@ fn era_is_always_same_length() { // This ensures that the sessions is always of the same length if there is no forcing no // session changes. ExtBuilder::default().build().execute_with(|| { - start_era(1); - assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), SessionsPerEra::get()); + let session_per_era = >::get(); + + mock::start_era(1); + assert_eq!(Staking::eras_start_session_index(active_era()).unwrap(), session_per_era); - start_era(2); - assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), SessionsPerEra::get() * 2); + mock::start_era(2); + assert_eq!(Staking::eras_start_session_index(active_era()).unwrap(), session_per_era * 2u32); let session = Session::current_index(); ForceEra::put(Forcing::ForceNew); advance_session(); advance_session(); assert_eq!(Staking::active_era().unwrap().index, 3); - assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), session + 2); + assert_eq!(Staking::eras_start_session_index(active_era()).unwrap(), session + 2); - start_era(4); - assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), session + 2 + SessionsPerEra::get()); + mock::start_era(4); + assert_eq!(Staking::eras_start_session_index(active_era()).unwrap(), session + 2u32 + session_per_era); }); } @@ -1964,6 +1947,7 @@ fn offence_forces_new_era() { fn offence_ensures_new_era_without_clobbering() { ExtBuilder::default().build().execute_with(|| { assert_ok!(Staking::force_new_era_always(Origin::ROOT)); + assert_eq!(Staking::force_era(), Forcing::ForceAlways); on_offence_now( &[OffenceDetails { @@ -1981,10 +1965,11 @@ fn offence_ensures_new_era_without_clobbering() { } #[test] -fn offence_deselects_validator_when_slash_is_zero() { +fn offence_deselects_validator_even_when_slash_is_zero() { ExtBuilder::default().build().execute_with(|| { assert!(Session::validators().contains(&11)); assert!(>::contains_key(11)); + on_offence_now( &[OffenceDetails { offender: ( @@ -1995,9 +1980,12 @@ fn offence_deselects_validator_when_slash_is_zero() { }], &[Perbill::from_percent(0)], ); + assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - start_era(1); + + mock::start_era(1); + assert!(!Session::validators().contains(&11)); assert!(!>::contains_key(11)); }); @@ -2034,10 +2022,11 @@ fn slashing_performed_according_exposure() { #[test] fn slash_in_old_span_does_not_deselect() { ExtBuilder::default().build().execute_with(|| { - start_era(1); + mock::start_era(1); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); + on_offence_now( &[OffenceDetails { offender: ( @@ -2048,17 +2037,18 @@ fn slash_in_old_span_does_not_deselect() { }], &[Perbill::from_percent(0)], ); + assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); - start_era(2); + mock::start_era(2); Staking::validate(Origin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(!Session::validators().contains(&11)); - start_era(3); + mock::start_era(3); // this staker is in a new slashing span now, having re-registered after // their prior slash. @@ -2075,7 +2065,7 @@ fn slash_in_old_span_does_not_deselect() { 1, ); - // not for zero-slash. + // not forcing for zero-slash and previous span. assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); assert!(Session::validators().contains(&11)); @@ -2096,7 +2086,7 @@ fn slash_in_old_span_does_not_deselect() { // or non-zero. assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); + assert!(Session::validators().contains(&11)); assert_ledger_consistent(11); }); } @@ -2135,7 +2125,7 @@ fn reporters_receive_their_slice() { #[test] fn subsequent_reports_in_same_span_pay_out_less() { // This test verifies that the reporters of the offence receive their slice from the slashed - // amount. + // amount, but less and less if they submit multiple reports in one span. ExtBuilder::default().build().execute_with(|| { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; @@ -2242,12 +2232,16 @@ fn dont_slash_if_fraction_is_zero() { // The validator hasn't been slashed. The new era is not forced. assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert_ledger_consistent(11); }); } #[test] fn only_slash_for_max_in_era() { + // multiple slashes within one era are only applied if it is more than any previous slash in the + // same era. ExtBuilder::default().build().execute_with(|| { assert_eq!(Balances::free_balance(11), 1000); @@ -2296,6 +2290,7 @@ fn only_slash_for_max_in_era() { #[test] fn garbage_collection_after_slashing() { + // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. ExtBuilder::default().existential_deposit(2).build().execute_with(|| { assert_eq!(Balances::free_balance(11), 256_000); @@ -2338,27 +2333,28 @@ fn garbage_collection_after_slashing() { #[test] fn garbage_collection_on_window_pruning() { + // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after + // `BondingDuration`. ExtBuilder::default().build().execute_with(|| { - start_era(1); + mock::start_era(1); assert_eq!(Balances::free_balance(11), 1000); + let now = Staking::active_era().unwrap().index; - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(now, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( &[ OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), + offender: (11, Staking::eras_stakers(now, 11)), reporters: vec![], }, ], &[Perbill::from_percent(10)], ); - let now = Staking::active_era().unwrap().index; - assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); @@ -2370,7 +2366,7 @@ fn garbage_collection_on_window_pruning() { assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); assert!(::NominatorSlashInEra::get(&now, &101).is_some()); - start_era(era); + mock::start_era(era); } assert!(::ValidatorSlashInEra::get(&now, &11).is_none()); @@ -2381,19 +2377,17 @@ fn garbage_collection_on_window_pruning() { #[test] fn slashing_nominators_by_span_max() { ExtBuilder::default().build().execute_with(|| { - start_era(1); - start_era(2); - start_era(3); + mock::start_era(1); + mock::start_era(2); + mock::start_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); assert_eq!(Balances::free_balance(101), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); - assert_eq!(Balances::free_balance(101), 2000); let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2481,9 +2475,9 @@ fn slashing_nominators_by_span_max() { #[test] fn slashes_are_summed_across_spans() { ExtBuilder::default().build().execute_with(|| { - start_era(1); - start_era(2); - start_era(3); + mock::start_era(1); + mock::start_era(2); + mock::start_era(3); assert_eq!(Balances::free_balance(21), 2000); assert_eq!(Staking::slashable_balance_of(&21), 1000); @@ -2511,7 +2505,7 @@ fn slashes_are_summed_across_spans() { // 21 has been force-chilled. re-signal intent to validate. Staking::validate(Origin::signed(20), Default::default()).unwrap(); - start_era(4); + mock::start_era(4); assert_eq!(Staking::slashable_balance_of(&21), 900); @@ -2539,7 +2533,7 @@ fn slashes_are_summed_across_spans() { #[test] fn deferred_slashes_are_deferred() { ExtBuilder::default().slash_defer_duration(2).build().execute_with(|| { - start_era(1); + mock::start_era(1); assert_eq!(Balances::free_balance(11), 1000); @@ -2560,19 +2554,19 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - start_era(2); + mock::start_era(2); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - start_era(3); + mock::start_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. - start_era(4); + mock::start_era(4); assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); @@ -2582,7 +2576,7 @@ fn deferred_slashes_are_deferred() { #[test] fn remove_deferred() { ExtBuilder::default().slash_defer_duration(2).build().execute_with(|| { - start_era(1); + mock::start_era(1); assert_eq!(Balances::free_balance(11), 1000); @@ -2603,7 +2597,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - start_era(2); + mock::start_era(2); on_offence_in_era( &[ @@ -2627,20 +2621,20 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - start_era(3); + mock::start_era(3); assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); // at the start of era 4, slashes from era 1 are processed, // after being deferred for at least 2 full eras. - start_era(4); + mock::start_era(4); // the first slash for 10% was cancelled, so no effect. assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(101), 2000); - start_era(5); + mock::start_era(5); let slash_10 = Perbill::from_percent(10); let slash_15 = Perbill::from_percent(15); @@ -2658,7 +2652,7 @@ fn remove_deferred() { #[test] fn remove_multi_deferred() { ExtBuilder::default().slash_defer_duration(2).build().execute_with(|| { - start_era(1); + mock::start_era(1); assert_eq!(Balances::free_balance(11), 1000); @@ -2737,39 +2731,1123 @@ fn remove_multi_deferred() { let slashes = ::UnappliedSlashes::get(&1); assert_eq!(slashes.len(), 2); - println!("Slashes: {:?}", slashes); assert_eq!(slashes[0].validator, 21); assert_eq!(slashes[1].validator, 42); }) } +mod offchain_phragmen { + use crate::*; + use frame_support::{assert_noop, assert_ok}; + use sp_runtime::transaction_validity::TransactionSource; + use mock::*; + use parking_lot::RwLock; + use sp_core::offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainExt, TransactionPoolExt, + }; + use sp_io::TestExternalities; + use sp_phragmen::StakedAssignment; + use frame_support::traits::OffchainWorker; + use std::sync::Arc; + use substrate_test_utils::assert_eq_uvec; + + fn percent(x: u16) -> OffchainAccuracy { + OffchainAccuracy::from_percent(x) + } + + /// setup a new set of validators and nominator storage items independent of the parent mock + /// file. This produces a edge graph that can be reduced. + fn build_offchain_phragmen_test_ext() { + for i in (10..=40).step_by(10) { + // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts + // have corresponding keys in session which makes everything more ergonomic and + // realistic. + bond_validator(i + 1, i, 100); + } + + let mut voter = 1; + bond_nominator(voter, 1000 + voter, 100, vec![11]); + voter = 2; + bond_nominator(voter, 1000 + voter, 100, vec![11, 11]); + voter = 3; + bond_nominator(voter, 1000 + voter, 100, vec![21, 41]); + voter = 4; + bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); + voter = 5; + bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); + } + + fn offchainify(ext: &mut TestExternalities) -> Arc> { + let (offchain, _state) = TestOffchainExt::new(); + let (pool, state) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + state + } + + #[test] + fn is_current_session_final_works() { + ExtBuilder::default() + .session_per_era(3) + .build() + .execute_with(|| { + mock::start_era(1); + assert_eq!(Session::current_index(), 3); + assert_eq!(Staking::current_era(), Some(1)); + assert_eq!(Staking::is_current_session_final(), false); + + start_session(4); + assert_eq!(Session::current_index(), 4); + assert_eq!(Staking::current_era(), Some(1)); + assert_eq!(Staking::is_current_session_final(), true); + + start_session(5); + assert_eq!(Session::current_index(), 5); + // era changed. + assert_eq!(Staking::current_era(), Some(2)); + assert_eq!(Staking::is_current_session_final(), false); + }) + } + + #[test] + fn offchain_election_flag_is_triggered() { + ExtBuilder::default() + .session_per_era(5) + .session_length(10) + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(7); + assert_session_era!(0, 0); + + run_to_block(10); + assert_session_era!(1, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + run_to_block(36); + assert_session_era!(3, 0); + + // fist era has session 0, which has 0 blocks length, so we have in total 40 blocks + // in the era. + run_to_block(37); + assert_session_era!(3, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + assert!(Staking::snapshot_nominators().is_some()); + assert!(Staking::snapshot_validators().is_some()); + + run_to_block(38); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + + run_to_block(39); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + + run_to_block(40); + assert_session_era!(4, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + run_to_block(86); + assert_session_era!(8, 1); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + // second era onwards has 50 blocks per era. + run_to_block(87); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(87)); + assert!(Staking::snapshot_nominators().is_some()); + assert!(Staking::snapshot_validators().is_some()); + + run_to_block(90); + assert_session_era!(9, 1); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + }) + } + + #[test] + fn offchain_election_flag_is_triggered_when_forcing() { + ExtBuilder::default() + .session_per_era(5) + .session_length(10) + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(7); + assert_session_era!(0, 0); + + run_to_block(12); + ForceEra::put(Forcing::ForceNew); + run_to_block(13); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + run_to_block(17); // instead of 47 + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); + }) + } + + #[test] + fn election_on_chain_fallback_works() { + ExtBuilder::default().build().execute_with(|| { + start_session(1); + start_session(2); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + // some election must have happened by now. + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::OnChain), + ); + }) + } + + #[test] + #[ignore] // This takes a few mins + fn offchain_wont_work_if_snapshot_fails() { + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(12); + assert!(Staking::snapshot_validators().is_some()); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + // validate more than the limit + let limit: NominatorIndex = ValidatorIndex::max_value() as NominatorIndex + 1; + let ctrl = 1_000_000; + for i in 0..limit { + bond_validator((1000 + i).into(), (1000 + i + ctrl).into(), 100); + } + + // window stays closed since no snapshot was taken. + run_to_block(27); + assert!(Staking::snapshot_validators().is_none()); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + }) + } + + #[test] + fn staking_is_locked_when_election_window_open() { + ExtBuilder::default() + .offchain_phragmen_ext() + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(12); + assert!(Staking::snapshot_validators().is_some()); + // given + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + // chill et. al. are now not allowed. + assert_noop!( + Staking::chill(Origin::signed(10)), + Error::::CallNotAllowed, + ); + }) + } + + #[test] + fn signed_result_can_be_submitted() { + // should check that we have a new validator set normally, + // event says that it comes from offchain. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(12); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + assert!(Staking::snapshot_validators().is_some()); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + + let queued_result = Staking::queued_elected().unwrap(); + assert_eq!(queued_result.compute, ElectionCompute::Signed); + + run_to_block(15); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::Signed), + ); + }) + } + + #[test] + fn signed_result_can_be_submitted_later() { + // same as `signed_result_can_be_submitted` but at a later block. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(14); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + + let queued_result = Staking::queued_elected().unwrap(); + assert_eq!(queued_result.compute, ElectionCompute::Signed); + + run_to_block(15); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::Signed), + ); + }) + } + + #[test] + fn early_solution_submission_is_rejected() { + // should check that we have a new validator set normally, + // event says that it comes from offchain. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(11); + // submission is not yet allowed + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + // create all the indices just to build the solution. + Staking::create_stakers_snapshot(); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + Staking::kill_stakers_snapshot(); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenEarlySubmission, + ); + }) + } + + #[test] + fn weak_solution_is_rejected() { + // A solution which is weaker than what we currently have on-chain is rejected. + ExtBuilder::default() + .offchain_phragmen_ext() + .has_stakers(false) + .validator_count(4) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // a good solution + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + + // a bad solution + let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenWeakSubmission, + ); + }) + } + + #[test] + fn better_solution_is_accepted() { + // A solution which is better than what we currently have on-chain is accepted. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // a meeeeh solution + let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + + // a better solution + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + }) + } + + #[test] + fn offchain_worker_runs_when_window_open() { + // at the end of the first finalized block with ElectionStatus::open(_), it should execute. + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(2) + .build(); + let state = offchainify(&mut ext); + ext.execute_with(|| { + run_to_block(12); + + // local key 11 is in the elected set. + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(state.read().transactions.len(), 0); + Staking::offchain_worker(12); + assert_eq!(state.read().transactions.len(), 1); + + let encoded = state.read().transactions[0].clone(); + let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); + + let call = extrinsic.call; + let inner = match call { + mock::Call::Staking(inner) => inner, + }; + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + TransactionValidity::Ok(ValidTransaction { + priority: (1 << 20) + 1125, // the proposed slot stake. + requires: vec![], + provides: vec![("StakingOffchain", active_era()).encode()], + longevity: 3, + propagate: false, + }) + ) + }) + } + + #[test] + fn mediocre_submission_from_authority_is_early_rejected() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let state = offchainify(&mut ext); + ext.execute_with(|| { + run_to_block(12); + // put a good solution on-chain + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ),); + + // now run the offchain worker in the same chain state. + Staking::offchain_worker(12); + assert_eq!(state.read().transactions.len(), 1); + + let encoded = state.read().transactions[0].clone(); + let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); + + let call = extrinsic.call; + let inner = match call { + mock::Call::Staking(inner) => inner, + }; + + // pass this call to ValidateUnsigned + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + TransactionValidity::Err( + InvalidTransaction::Custom(>::PhragmenWeakSubmission.as_u8()).into(), + ), + ) + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners() { + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + ValidatorCount::put(3); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + ValidatorCount::put(4); + + assert_eq!(winners.len(), 3); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusWinnerCount, + ); + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners_1() { + // if we have too little validators, then the number of candidates is the bound. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(8) // we simply cannot elect 8 + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + ValidatorCount::put(3); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + ValidatorCount::put(4); + + assert_eq!(winners.len(), 3); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusWinnerCount, + ); + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners_2() { + // if we have too little validators, then the number of candidates is the bound. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(8) // we simply cannot elect 8 + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + + assert_eq!(winners.len(), 4); + + // all good. We chose 4 and it works. + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ),); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_nominator_index() { + // A nominator index which is simply invalid + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); + + // index 9 doesn't exist. + compact.votes1.push((9, 2)); + + // The error type sadly cannot be more specific now. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_validator_index() { + // A validator index which is out of bound + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); + + // index 4 doesn't exist. + compact.votes1.push((3, 4)); + + // The error type sadly cannot be more specific now. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_winner_index() { + // A winner index which is simply invalid + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, _, score) = prepare_submission_with(true, |_| {}); + + // index 4 doesn't exist. + let winners = vec![0, 1, 2, 4]; + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusWinner, + ); + }) + } + + #[test] + fn invalid_phragmen_result_non_winner_validator_index() { + // An edge that points to a correct validator index who is NOT a winner. This is very + // similar to the test that raises `PhragmenBogusNomination`. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(2) // we select only 2. + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, winners, score) = prepare_submission_with(true, |a| { + a.iter_mut() + .find(|x| x.who == 5) + // all 3 cannot be among the winners. Although, all of them are validator + // candidates. + .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusEdge, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_self_vote() { + // A self vote for someone else. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |a| { + // mutate a self vote to target someone else. That someone else is still among the + // winners + a.iter_mut().find(|x| x.who == 11).map(|x| { + x.distribution + .iter_mut() + .find(|y| y.0 == 11) + .map(|y| y.0 = 21) + }); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusSelfVote, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_self_vote_2() { + // A self validator voting for someone else next to self vote. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |a| { + // Remove the self vote. + a.retain(|x| x.who != 11); + // add is as a new double vote + a.push(StakedAssignment { + who: 11, + distribution: vec![(11, 50), (21, 50)], + }); + }); + + // This raises score issue. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusSelfVote, + ); + }) + } + + #[test] + fn invalid_phragmen_result_over_stake() { + // Someone's edge ratios sums to more than 100%. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you + // reduce. + let (mut compact, winners, score) = prepare_submission_with(false, |_| {}); + + if let Some(c) = compact.votes3.iter_mut().find(|x| x.0 == 0) { + // by default it should have been (0, [(2, 33%), (1, 33%)], 0) + // now the sum is above 100% + c.1 = [(2, percent(66)), (1, percent(66))]; + } + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_under_stake() { + // at the time of this writing, we cannot under stake someone. The compact assignment works + // in a way that some of the stakes are presented by the submitter, and the last one is read + // from chain by subtracting the rest from total. Hence, the sum is always correct. + // This test is only here as a demonstration. + } + + #[test] + fn invalid_phragmen_result_invalid_target_stealing() { + // A valid voter who voted for someone who is a candidate, and is a correct winner, but is + // actually NOT nominated by this nominator. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(false, |a| { + // 3 only voted for 20 and 40. We add a fake vote to 30. The stake sum is still + // correctly 100. + a.iter_mut() + .find(|x| x.who == 3) + .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusNomination, + ); + }) + } + + #[test] + fn nomination_slash_filter_is_checked() { + // If a nominator has voted for someone who has been recently slashed, that particular + // nomination should be disabled for the upcoming election. A solution must respect this + // rule. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + + // finalize the round with fallback. This is needed since all nominator submission + // are in era zero and we want this one to pass with no problems. + run_to_block(15); + + // go to the next session to trigger mock::start_era and bump the active era + run_to_block(20); + + // slash 10. This must happen outside of the election window. + let offender_expo = Staking::eras_stakers(active_era(), 11); + on_offence_now( + &[OffenceDetails { + offender: (11, offender_expo.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); + + // validate 10 again for the next round. But this guy will not have the votes that + // it should have had from 1 and 2. + assert_ok!(Staking::validate( + Origin::signed(10), + Default::default() + )); + + // open the election window and create snapshots. + run_to_block(32); + + // a solution that has been prepared after the slash. + let (compact, winners, score) = prepare_submission_with(false, |a| { + // no one is allowed to vote for 10, except for itself. + a.into_iter() + .filter(|s| s.who != 11) + .for_each(|s| + assert!(s.distribution.iter().find(|(t, _)| *t == 11).is_none()) + ); + }); + + // can be submitted. + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + )); + + // a wrong solution. + let (compact, winners, score) = prepare_submission_with(false, |a| { + // add back the vote that has been filtered out. + a.push(StakedAssignment { + who: 1, + distribution: vec![(11, 100)] + }); + }); + + // is rejected. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenSlashedNomination, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_score() { + // A valid voter who's total distributed stake is more than what they bond + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, mut score) = prepare_submission_with(true, |_| {}); + score[0] += 1; + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + active_era(), + ), + Error::::PhragmenBogusScore, + ); + }) + } + + #[test] + fn offchain_storage_is_set() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let state = offchainify(&mut ext); + + ext.execute_with(|| { + use offchain_election::OFFCHAIN_HEAD_DB; + use sp_runtime::offchain::storage::StorageValueRef; + + run_to_block(12); + + Staking::offchain_worker(12); + // it works + assert_eq!(state.read().transactions.len(), 1); + + // and it is set + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + assert_eq!(storage.get::().unwrap().unwrap(), 12); + }) + } + + #[test] + fn offchain_storage_prevents_duplicate() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let _ = offchainify(&mut ext); + + ext.execute_with(|| { + use offchain_election::OFFCHAIN_HEAD_DB; + use sp_runtime::offchain::storage::StorageValueRef; + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + + run_to_block(12); + + // first run -- ok + assert_eq!( + offchain_election::set_check_offchain_execution_status::(12), + Ok(()), + ); + assert_eq!(storage.get::().unwrap().unwrap(), 12); + + // re-execute after the next. not allowed. + assert_eq!( + offchain_election::set_check_offchain_execution_status::(13), + Err("recently executed."), + ); + + // a fork like situation -- re-execute 10, 11, 12. But it won't go through. + assert_eq!( + offchain_election::set_check_offchain_execution_status::(10), + Err("fork."), + ); + assert_eq!( + offchain_election::set_check_offchain_execution_status::(11), + Err("fork."), + ); + assert_eq!( + offchain_election::set_check_offchain_execution_status::(12), + Err("recently executed."), + ); + }) + } + + #[test] + #[should_panic] + fn offence_is_blocked_when_window_open() { + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + run_to_block(12); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + let offender_expo = Staking::eras_stakers(active_era(), 10); + + // panic from the impl in mock + on_offence_now( + &[OffenceDetails { + offender: (10, offender_expo.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + }) + } +} + #[test] -fn slash_kicks_validators_not_nominators() { +fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { ExtBuilder::default().build().execute_with(|| { - start_era(1); + mock::start_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + // pre-slash balance assert_eq!(Balances::free_balance(11), 1000); - - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], + &[OffenceDetails { + offender: (11, exposure_11.clone()), + reporters: vec![], + }], &[Perbill::from_percent(10)], ); + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + assert_eq!( + Balances::free_balance(101), + 2000 - nominator_slash_amount_11 + ); // This is the best way to check that the validator was chilled; `get` will // return default value. - for (stash, _) in ::Validators::enumerate() { + for (stash, _) in ::Validators::iter() { assert!(stash != 11); } @@ -2777,8 +3855,23 @@ fn slash_kicks_validators_not_nominators() { // and make sure that the vote will be ignored even if the validator // re-registers. - let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); + let last_slash = ::SlashingSpans::get(&11) + .unwrap() + .last_nonzero_slash(); assert!(nominations.submitted_in < last_slash); + + // actually re-bond the slashed validator + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + + mock::start_era(2); + let exposure_11 = Staking::eras_stakers(active_era(), &11); + let exposure_21 = Staking::eras_stakers(active_era(), &21); + + // 10 is re-elected, but without the support of 100 + assert_eq!(exposure_11.total, 900); + + // 20 is re-elected, with the (almost) entire support of 100 + assert_eq!(exposure_21.total, 1000 + 500 - nominator_slash_amount_11); }); } @@ -2804,7 +3897,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { let total_payout_0 = current_total_payout_for_duration(3000); assert!(total_payout_0 > 10); // Test is meaningful if reward something - start_era(1); + mock::start_era(1); >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout @@ -2814,7 +3907,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert!(total_payout_1 > 10); // Test is meaningful if reward something assert!(total_payout_1 != total_payout_0); - start_era(2); + mock::start_era(2); >::reward_by_ids(vec![(11, 1)]); // Change total issuance in order to modify total payout @@ -2825,7 +3918,7 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { assert!(total_payout_2 != total_payout_0); assert!(total_payout_2 != total_payout_1); - start_era(Staking::history_depth() + 1); + mock::start_era(Staking::history_depth() + 1); let active_era = Staking::active_era().unwrap().index; @@ -2835,41 +3928,23 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { // Last kept is 1: assert!(current_era - Staking::history_depth() == 1); assert_noop!( - Staking::payout_validator(Origin::signed(10), 0), + Staking::payout_stakers(Origin::signed(1337), 11, 0), // Fail: Era out of history Error::::InvalidEraToReward ); - assert_ok!(Staking::payout_validator(Origin::signed(10), 1)); - assert_ok!(Staking::payout_validator(Origin::signed(10), 2)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); assert_noop!( - Staking::payout_validator(Origin::signed(10), 2), + Staking::payout_stakers(Origin::signed(1337), 11, 2), // Fail: Double claim - Error::::InvalidEraToReward + Error::::AlreadyClaimed ); assert_noop!( - Staking::payout_validator(Origin::signed(10), active_era), + Staking::payout_stakers(Origin::signed(1337), 11, active_era), // Fail: Era not finished yet Error::::InvalidEraToReward ); - assert_noop!( - Staking::payout_nominator(Origin::signed(100), 0, vec![(11, 0)]), - // Fail: Era out of history - Error::::InvalidEraToReward - ); - assert_ok!(Staking::payout_nominator(Origin::signed(100), 1, vec![(11, 0)])); - assert_ok!(Staking::payout_nominator(Origin::signed(100), 2, vec![(11, 0)])); - assert_noop!( - Staking::payout_nominator(Origin::signed(100), 2, vec![(11, 0)]), - // Fail: Double claim - Error::::InvalidEraToReward - ); - assert_noop!( - Staking::payout_nominator(Origin::signed(100), active_era, vec![(11, 0)]), - // Fail: Era not finished yet - Error::::InvalidEraToReward - ); - // Era 0 can't be rewarded anymore and current era can't be rewarded yet // only era 1 and 2 can be rewarded. @@ -2887,11 +3962,11 @@ fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { #[test] fn zero_slash_keeps_nominators() { ExtBuilder::default().build().execute_with(|| { - start_era(1); + mock::start_era(1); assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure = Staking::eras_stakers(active_era(), 11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -2909,7 +3984,7 @@ fn zero_slash_keeps_nominators() { // This is the best way to check that the validator was chilled; `get` will // return default value. - for (stash, _) in ::Validators::enumerate() { + for (stash, _) in ::Validators::iter() { assert!(stash != 11); } @@ -3004,10 +4079,6 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( mock::start_era(2); mock::make_all_reward_payment(1); - // nominator 10_000 can't get its reward because exposure is clipped. However it will try - // to query other people reward. - assert_ok!(Staking::payout_nominator(Origin::signed(20_000), 1, vec![(11, 0)])); - // Assert only nominators from 1 to Max are rewarded for i in 0..=::MaxNominatorRewardedPerValidator::get() { let stash = 10_000 + i as u64; @@ -3024,7 +4095,7 @@ fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward( #[test] fn set_history_depth_works() { ExtBuilder::default().build().execute_with(|| { - start_era(10); + mock::start_era(10); Staking::set_history_depth(Origin::ROOT, 20).unwrap(); assert!(::ErasTotalStake::contains_key(10 - 4)); assert!(::ErasTotalStake::contains_key(10 - 5)); @@ -3039,3 +4110,630 @@ fn set_history_depth_works() { assert!(!::ErasTotalStake::contains_key(10 - 5)); }); } + +#[test] +fn test_payout_stakers() { + // Here we will test validator can set `max_nominators_payout` and it works. + // We also test that `payout_extra_nominators` works. + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + let balance = 1000; + // Create three validators: + bond_validator(11, 10, balance); // Default(64) + + // Create nominators, targeting stash of validators + for i in 0..100 { + bond_nominator(1000 + i, 100 + i, balance + i, vec![11]); + } + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + + // Top 64 nominators of validator 11 automatically paid out, including the validator + // Validator payout goes to controller. + assert!(Balances::free_balance(&10) > balance); + for i in 36..100 { + assert!(Balances::free_balance(&(100 + i)) > balance + i); + } + // The bottom 36 do not + for i in 0..36 { + assert_eq!(Balances::free_balance(&(100 + i)), balance + i); + } + + // We track rewards in `claimed_rewards` vec + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![1] }) + ); + + for i in 3..16 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); + } + + // We track rewards in `claimed_rewards` vec + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: (1..=14).collect() }) + ); + + for i in 16..100 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + } + + // We clean it up as history passes + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 98] }) + ); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 69)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 23)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 23, 42, 69, 98] }) + ); + }); +} + +#[test] +fn payout_stakers_handles_basic_errors() { + // Here we will test payouts handle all errors. + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + // Same setup as the test above + let balance = 1000; + bond_validator(11, 10, balance); // Default(64) + + // Create nominators, targeting stash + for i in 0..100 { + bond_nominator(1000 + i, 100 + i, balance + i, vec![11]); + } + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + + // Wrong Era, too big + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); + // Wrong Staker + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash); + + for i in 3..100 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + } + // We are at era 99, with history depth of 84 + // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward); + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + + // Can't claim again + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed); + assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed); + }); +} + +#[test] +fn bond_during_era_correctly_populates_claimed_rewards() { + ExtBuilder::default().has_stakers(false).build().execute_with(|| { + // Era = None + bond_validator(9, 8, 1000); + assert_eq!( + Staking::ledger(&8), + Some(StakingLedger { + stash: 9, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + mock::start_era(5); + bond_validator(11, 10, 1000); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (0..5).collect(), + }) + ); + mock::start_era(99); + bond_validator(13, 12, 1000); + assert_eq!( + Staking::ledger(&12), + Some(StakingLedger { + stash: 13, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (15..99).collect(), + }) + ); + }); +} + +/* These migration tests below can be removed once migration code is removed */ + +#[test] +fn assert_migration_is_noop() { + let kusama_active_era = "4a0200000190e2721171010000"; + let era = ActiveEraInfo::decode(&mut &hex::decode(kusama_active_era).unwrap()[..]).unwrap(); + assert_eq!(era.index, 586); + assert_eq!(era.start, Some(1585135674000)); +} + +#[test] +fn test_last_reward_migration() { + use sp_storage::Storage; + + let mut s = Storage::default(); + + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] + struct OldStakingLedger { + pub stash: AccountId, + #[codec(compact)] + pub total: Balance, + #[codec(compact)] + pub active: Balance, + pub unlocking: Vec>, + pub last_reward: Option, + } + + let old_staking10 = OldStakingLedger:: { + stash: 0, + total: 10, + active: 10, + unlocking: vec![UnlockChunk{ value: 1234, era: 56}], + last_reward: Some(8), + }; + + let old_staking11 = OldStakingLedger:: { + stash: 1, + total: 0, + active: 0, + unlocking: vec![], + last_reward: None, + }; + + let old_staking12 = OldStakingLedger:: { + stash: 2, + total: 100, + active: 100, + unlocking: vec![UnlockChunk{ value: 9876, era: 54}, UnlockChunk{ value: 98, era: 76}], + last_reward: Some(23), + }; + + let old_staking13 = OldStakingLedger:: { + stash: 3, + total: 100, + active: 100, + unlocking: vec![], + last_reward: Some(23), + }; + + let data = vec![ + ( + Ledger::::hashed_key_for(10), + old_staking10.encode().to_vec() + ), + ( + Ledger::::hashed_key_for(11), + old_staking11.encode().to_vec() + ), + ( + Ledger::::hashed_key_for(12), + old_staking12.encode().to_vec() + ), + ( + Ledger::::hashed_key_for(13), + old_staking13.encode().to_vec() + ), + ]; + + s.top = data.into_iter().collect(); + sp_io::TestExternalities::new(s).execute_with(|| { + HistoryDepth::put(84); + CurrentEra::put(99); + let nominations = Nominations:: { + targets: vec![], + submitted_in: 0, + suppressed: false + }; + Nominators::::insert(3, nominations); + Bonded::::insert(3, 13); + Staking::migrate_last_reward_to_claimed_rewards(); + // Test staker out of range + assert_eq!( + Ledger::::get(10), + Some(StakingLedger { + stash: 0, + total: 10, + active: 10, + unlocking: vec![UnlockChunk{ value: 1234, era: 56}], + claimed_rewards: vec![], + }) + ); + // Test staker none + assert_eq!( + Ledger::::get(11), + Some(StakingLedger { + stash: 1, + total: 0, + active: 0, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Test staker migration + assert_eq!( + Ledger::::get(12), + Some(StakingLedger { + stash: 2, + total: 100, + active: 100, + unlocking: vec![UnlockChunk{ value: 9876, era: 54}, UnlockChunk{ value: 98, era: 76}], + claimed_rewards: vec![15,16,17,18,19,20,21,22,23], + }) + ); + // Test nominator migration + assert_eq!( + Ledger::::get(13), + Some(StakingLedger { + stash: 3, + total: 100, + active: 100, + unlocking: vec![], + claimed_rewards: vec![15,16,17,18,19,20,21,22,23], + }) + ); + }); +} + +#[test] +fn rewards_should_work_before_migration() { + // should check that before migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build().execute_with(|| { + MigrateEra::put(10); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { + total: 50*3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + }); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2/3 + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment_before_migration(1); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); +} + +#[test] +fn migrate_era_should_work() { + // should check that before and after migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build().execute_with(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { + total: 50*3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + }); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2/3 + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment(1); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); +} + +#[test] +#[should_panic] +fn migrate_era_should_handle_error() { + ExtBuilder::default().nominate(true).build().execute_with(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { + total: 50*3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + }); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment(0); + }); +} + +#[test] +#[should_panic] +fn migrate_era_should_handle_errors_2() { + // should check that before and after migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build().execute_with(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { + total: 50*3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + }); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2/3 + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment_before_migration(1); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); +} diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index 8d645df85cb9457934099f2f05029b64ecee4e30..4216b94ec12f12ccf8514698a78a25e3e9752ac6 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-sudo" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,15 +10,15 @@ description = "FRAME pallet for sudo" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -31,3 +31,6 @@ std = [ "frame-support/std", "frame-system/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index 8ee09ba223ab1a393fadb9ff41056dbac21c756b..3f2eacdbf8139a83530fcf7536b6ad981e6af58f 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -58,6 +58,7 @@ //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = frame_support::weights::SimpleDispatchInfo::default()] //! pub fn privileged_function(origin) -> dispatch::DispatchResult { //! ensure_root(origin)?; //! @@ -86,7 +87,7 @@ #![cfg_attr(not(feature = "std"), no_std)] use sp_std::prelude::*; -use sp_runtime::{traits::{StaticLookup, Dispatchable}, DispatchError}; +use sp_runtime::traits::{StaticLookup, Dispatchable}; use frame_support::{ Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, @@ -120,7 +121,7 @@ decl_module! { /// - Weight of derivative `call` execution + 10,000. /// # #[weight = FunctionOf( - |args: (&Box<::Call>,)| args.0.get_dispatch_info().weight + 10_000, + |args: (&Box<::Call>,)| args.0.get_dispatch_info().weight + 10_000, |args: (&Box<::Call>,)| args.0.get_dispatch_info().class, true )] @@ -132,7 +133,6 @@ decl_module! { let res = match call.dispatch(frame_system::RawOrigin::Root.into()) { Ok(_) => true, Err(e) => { - let e: DispatchError = e.into(); sp_runtime::print(e); false } @@ -150,6 +150,7 @@ decl_module! { /// - Limited storage reads. /// - One DB change. /// # + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn set_key(origin, new: ::Source) { // This is a public call, so we ensure that the origin is some signed account. let sender = ensure_signed(origin)?; @@ -174,7 +175,7 @@ decl_module! { #[weight = FunctionOf( |args: (&::Source, &Box<::Call>,)| { args.1.get_dispatch_info().weight + 10_000 - }, + }, |args: (&::Source, &Box<::Call>,)| { args.1.get_dispatch_info().class }, @@ -190,7 +191,6 @@ decl_module! { let res = match call.dispatch(frame_system::RawOrigin::Signed(who).into()) { Ok(_) => true, Err(e) => { - let e: DispatchError = e.into(); sp_runtime::print(e); false } diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index 78a7642d9f107cc576f95eb4e2a644e29c78c60c..3bad72a115716b6f5c1f6297bf62ef40d24674a2 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,25 +11,25 @@ description = "Support code for the runtime." [dependencies] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -frame-metadata = { version = "11.0.0-alpha.2", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0-alpha.2", path = "./procedural" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +frame-metadata = { version = "11.0.0-alpha.5", default-features = false, path = "../metadata" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +frame-support-procedural = { version = "2.0.0-alpha.5", path = "./procedural" } paste = "0.1.6" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-alpha.5", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" tracing = { version = "0.1.10", optional = true } [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0-alpha.2", path = "../system" } +frame-system = { version = "2.0.0-alpha.5", path = "../system" } [features] default = ["std"] @@ -49,3 +49,7 @@ std = [ ] nightly = [] strict = [] +runtime-benchmarks = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 8d8ecb18a761b94c22d16267531cb328cf7b7a35..2f7450e4b86d3ea960a9b9b2e1e3b2c32b1af306 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,7 +12,10 @@ description = "Proc macro of Support code for the runtime." proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0-alpha.2", path = "./tools" } +frame-support-procedural-tools = { version = "2.0.0-alpha.5", path = "./tools" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index 942a47a533e5f894425d9eb60def32c51ed73ff6..b74a27e7ba936c982512438fc47d5ab7dd5dc379 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -89,7 +89,7 @@ fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result Result( diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index f4b82a09ccb09b2a857effab09f1a298dc5b8fb8..a9662f530a5983438807ea47ef8827e80ef6032a 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -33,8 +33,8 @@ use proc_macro::TokenStream; /// decl_storage! { /// trait Store for Module as Example { /// Foo get(fn foo) config(): u32=12; -/// Bar: map hasher(blake2_256) u32 => u32; -/// pub Zed build(|config| vec![(0, 0)]): linked_map hasher(blake2_256) u32 => u32; +/// Bar: map hasher(identity) u32 => u32; +/// pub Zed build(|config| vec![(0, 0)]): map hasher(identity) u32 => u32; /// } /// } /// ``` @@ -70,10 +70,28 @@ use proc_macro::TokenStream; /// And [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). /// /// `$hash` representing a choice of hashing algorithms available in the -/// [`Hashable`](../frame_support/trait.Hashable.html) trait. -/// -/// `blake2_256` and `blake2_128_concat` are strong hasher. One should use another hasher -/// with care, see generator documentation. +/// [`Hashable`](../frame_support/trait.Hashable.html) trait. You will generally want to use one +/// of three hashers: +/// * `blake2_128_concat`: The default, safe choice. Use if you are unsure or don't care. It is +/// secure against user-tainted keys, fairly fast and memory-efficient and supports +/// iteration over its keys and values. This must be used if the keys of your map can be +/// selected *en masse* by untrusted users. +/// * `twox_64_concat`: This is an insecure hasher and can only be used safely if you know that +/// the preimages cannot be chosen at will by untrusted users. It is memory-efficient, extremely +/// performant and supports iteration over its keys and values. You can safely use this is the +/// key is: +/// - A (slowly) incrementing index. +/// - Known to be the result of a cryptographic hash (though `identity` is a better choice here). +/// - Known to be the public key of a cryptographic key pair in existence. +/// * `identity`: This is not a hasher at all, and just uses the key material directly. Since it +/// does no hashing or appending, it's the fastest possible hasher, however, it's also the least +/// secure. It can be used only if you know that the key will be cryptographically/securely +/// randomly distributed over the binary encoding space. In most cases this will not be true. +/// One case where it is true, however, if where the key is itself the result of a cryptographic +/// hash of some existent data. +/// +/// Other hashers will tend to be "opaque" and not support iteration over the keys in the +/// map. It is not recommended to use these. /// /// The generator is implemented with: /// * `module_prefix`: $module_prefix @@ -85,36 +103,6 @@ use proc_macro::TokenStream; /// twox128(module_prefix) ++ twox128(storage_prefix) ++ hasher(encode(key)) /// ``` /// -/// * Linked map: `Foo: linked_map hasher($hash) type => type`: Implements the -/// [`StorageLinkedMap`](../frame_support/storage/trait.StorageLinkedMap.html) trait using the -/// [`StorageLinkedMap generator`](../frame_support/storage/generator/trait.StorageLinkedMap.html). -/// And [`StoragePrefixedMap`](../frame_support/storage/trait.StoragePrefixedMap.html). -/// -/// `$hash` representing a choice of hashing algorithms available in the -/// [`Hashable`](../frame_support/trait.Hashable.html) trait. -/// -/// `blake2_256` and `blake2_128_concat` are strong hasher. One should use another hasher -/// with care, see generator documentation. -/// -/// All key formatting logic can be accessed in a type-agnostic format via the -/// `KeyFormat` trait, which -/// is implemented for the storage linked map type as well. -/// -/// The generator key format is implemented with: -/// * `module_prefix`: $module_prefix -/// * `storage_prefix`: storage_name -/// * `head_prefix`: `"HeadOf" ++ storage_name` -/// * `Hasher`: $hash -/// -/// Thus the keys are stored at: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher(encode(key)) -/// ``` -/// and head is stored at: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(head_prefix) -/// ``` -/// /// * Double map: `Foo: double_map hasher($hash1) u32, hasher($hash2) u32 => u32`: Implements the /// [`StorageDoubleMap`](../frame_support/storage/trait.StorageDoubleMap.html) trait using the /// [`StorageDoubleMap generator`](../frame_support/storage/generator/trait.StorageDoubleMap.html). @@ -124,14 +112,6 @@ use proc_macro::TokenStream; /// [`Hashable`](../frame_support/trait.Hashable.html) trait. They must be chosen with care, see /// generator documentation. /// -/// If the first key is untrusted, a cryptographic `hasher` such as `blake2_256` or -/// `blake2_128_concat` must be used. -/// Otherwise, other values of all storage items can be compromised. -/// -/// If the second key is untrusted, a cryptographic `hasher` such as `blake2_256` or -/// `blake2_128_concat` must be used. -/// Otherwise, other items in storage with the same first key can be compromised. -/// /// The generator is implemented with: /// * `module_prefix`: $module_prefix /// * `storage_prefix`: storage_name @@ -145,10 +125,16 @@ use proc_macro::TokenStream; /// /// Supported hashers (ordered from least to best security): /// -/// * `twox_64_concat` - TwoX with 64bit + key concatenated. +/// * `identity` - Just the unrefined key material. Use only when it is known to be a secure hash +/// already. The most efficient and iterable over keys. +/// * `twox_64_concat` - TwoX with 64bit + key concatenated. Use only when an untrusted source +/// cannot select and insert key values. Very efficient and iterable over keys. +/// * `blake2_128_concat` - Blake2 with 128bit + key concatenated. Slower but safe to use in all +/// circumstances. Iterable over keys. +/// +/// Deprecated hashers, which do not support iteration over keys include: /// * `twox_128` - TwoX with 128bit. /// * `twox_256` - TwoX with with 256bit. -/// * `blake2_128_concat` - Blake2 with 128bit + key concatenated. /// * `blake2_128` - Blake2 with 128bit. /// * `blake2_256` - Blake2 with 256bit. /// diff --git a/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/frame/support/procedural/src/storage/genesis_config/builder_def.rs index 59e436880728fb5b36d1982113d1fe498d4ba0e1..87255ee481b37b8debf9d2ff25b55775e8fe46c2 100644 --- a/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/builder_def.rs @@ -88,7 +88,7 @@ impl BuilderDef { }} }, StorageLineTypeDef::Simple(_) => unreachable!(), - StorageLineTypeDef::Map(map) | StorageLineTypeDef::LinkedMap(map) => { + StorageLineTypeDef::Map(map) => { let key = &map.key; quote!{{ #data diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index ae7ffa64bf5d5b5de1a390f20ce6e35aed1f6434..9b6ddc92178bd6f981fde39c47933412c565c31e 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -93,7 +93,7 @@ impl GenesisConfigDef { let typ = match &line.storage_type { StorageLineTypeDef::Simple(_) => (*value_type).clone(), - StorageLineTypeDef::Map(map) | StorageLineTypeDef::LinkedMap(map) => { + StorageLineTypeDef::Map(map) => { let key = &map.key; parse_quote!( Vec<(#key, #value_type)> ) }, diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index 09afcb9a60d78ae263c162ac011819c746ff9c20..eeeca150d9b9b8419922f73f1ef51ed3ce46e095 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -66,6 +66,7 @@ fn decl_genesis_config_and_impl_default( let genesis_where_clause = &genesis_config.genesis_where_clause; quote!( + /// Genesis config for the module, allow to build genesis storage. #[derive(#scrate::Serialize, #scrate::Deserialize)] #[cfg(feature = "std")] #[serde(rename_all = "camelCase")] @@ -138,6 +139,7 @@ fn impl_build_storage( quote!{ #[cfg(feature = "std")] impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { + /// Build the storage for this module. pub fn build_storage #fn_generic (&self) -> std::result::Result< #scrate::sp_runtime::Storage, String diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs index 34d182d7f4d7bff8999682085e1621c6204031e5..ae0e646fcd73aaf0d398d82755372dbfe9736da6 100644 --- a/frame/support/procedural/src/storage/getters.rs +++ b/frame/support/procedural/src/storage/getters.rs @@ -40,7 +40,7 @@ pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStrea } } }, - StorageLineTypeDef::Map(map) | StorageLineTypeDef::LinkedMap(map) => { + StorageLineTypeDef::Map(map) => { let key = &map.key; let value = &map.value; quote!{ diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index f8dfb10d2edb26aae92675700dc38aefd0c5c2f0..bb23c99d9df5160bc3f09d780c7960bcf9c0dfa2 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -41,20 +41,7 @@ fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> hasher: #scrate::metadata::#hasher, key: #scrate::metadata::DecodeDifferent::Encode(#key), value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - is_linked: false, - } - } - }, - StorageLineTypeDef::LinkedMap(map) => { - let hasher = map.hasher.into_metadata(); - let key = &map.key; - let key = clean_type_string("e!(#key).to_string()); - quote!{ - #scrate::metadata::StorageEntryType::Map { - hasher: #scrate::metadata::#hasher, - key: #scrate::metadata::DecodeDifferent::Encode(#key), - value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - is_linked: true, + unused: false, } } }, diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index 3345c5a123c18ef0cc7f2bbd9ee0ade0b5b9daf2..e8599c52a907173f5c19f1359bc43205667a749e 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -235,10 +235,6 @@ impl StorageLineDefExt { ext::type_contains_ident(&map.key, &def.module_runtime_generic) || ext::type_contains_ident(&map.value, &def.module_runtime_generic) } - StorageLineTypeDef::LinkedMap(map) => { - ext::type_contains_ident(&map.key, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } StorageLineTypeDef::DoubleMap(map) => { ext::type_contains_ident(&map.key1, &def.module_runtime_generic) || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) @@ -249,7 +245,6 @@ impl StorageLineDefExt { let query_type = match &storage_def.storage_type { StorageLineTypeDef::Simple(value) => value.clone(), StorageLineTypeDef::Map(map) => map.value.clone(), - StorageLineTypeDef::LinkedMap(map) => map.value.clone(), StorageLineTypeDef::DoubleMap(map) => map.value.clone(), }; let is_option = ext::extract_type_option(&query_type).is_some(); @@ -291,10 +286,6 @@ impl StorageLineDefExt { let key = &map.key; quote!( StorageMap<#key, #value_type> ) }, - StorageLineTypeDef::LinkedMap(map) => { - let key = &map.key; - quote!( StorageLinkedMap<#key, #value_type> ) - }, StorageLineTypeDef::DoubleMap(map) => { let key1 = &map.key1; let key2 = &map.key2; @@ -336,7 +327,6 @@ impl StorageLineDefExt { pub enum StorageLineTypeDef { Map(MapDef), - LinkedMap(MapDef), DoubleMap(DoubleMapDef), Simple(syn::Type), } @@ -372,6 +362,7 @@ pub enum HasherKind { Twox256, Twox128, Twox64Concat, + Identity, } impl HasherKind { @@ -383,6 +374,7 @@ impl HasherKind { HasherKind::Twox256 => quote!( Twox256 ), HasherKind::Twox128 => quote!( Twox128 ), HasherKind::Twox64Concat => quote!( Twox64Concat ), + HasherKind::Identity => quote!( Identity ), } } @@ -394,6 +386,7 @@ impl HasherKind { HasherKind::Twox256 => quote!( StorageHasher::Twox256 ), HasherKind::Twox128 => quote!( StorageHasher::Twox128 ), HasherKind::Twox64Concat => quote!( StorageHasher::Twox64Concat ), + HasherKind::Identity => quote!( StorageHasher::Identity ), } } } @@ -420,7 +413,6 @@ pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStr use #scrate::{ StorageValue as _, StorageMap as _, - StorageLinkedMap as _, StorageDoubleMap as _, StoragePrefixedMap as _, }; diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index b503c2b81caea22255287666a74aaeb931c76c30..af568c78cc6b72f3f71a9827ef4c260a11851425 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -27,15 +27,18 @@ mod keyword { syn::custom_keyword!(build); syn::custom_keyword!(get); syn::custom_keyword!(map); - syn::custom_keyword!(linked_map); syn::custom_keyword!(double_map); - syn::custom_keyword!(blake2_256); - syn::custom_keyword!(blake2_128); + syn::custom_keyword!(opaque_blake2_256); + syn::custom_keyword!(opaque_blake2_128); syn::custom_keyword!(blake2_128_concat); - syn::custom_keyword!(twox_256); - syn::custom_keyword!(twox_128); + syn::custom_keyword!(opaque_twox_256); + syn::custom_keyword!(opaque_twox_128); syn::custom_keyword!(twox_64_concat); + syn::custom_keyword!(identity); syn::custom_keyword!(hasher); + syn::custom_keyword!(tainted); + syn::custom_keyword!(natural); + syn::custom_keyword!(prehashed); } /// Specific `Opt` to implement structure with optional parsing @@ -194,7 +197,6 @@ impl_parse_for_opt!(DeclStorageBuild => keyword::build); #[derive(ToTokens, Debug)] enum DeclStorageType { Map(DeclStorageMap), - LinkedMap(DeclStorageLinkedMap), DoubleMap(DeclStorageDoubleMap), Simple(syn::Type), } @@ -203,8 +205,6 @@ impl syn::parse::Parse for DeclStorageType { fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { if input.peek(keyword::map) { Ok(Self::Map(input.parse()?)) - } else if input.peek(keyword::linked_map) { - Ok(Self::LinkedMap(input.parse()?)) } else if input.peek(keyword::double_map) { Ok(Self::DoubleMap(input.parse()?)) } else { @@ -222,15 +222,6 @@ struct DeclStorageMap { pub value: syn::Type, } -#[derive(Parse, ToTokens, Debug)] -struct DeclStorageLinkedMap { - pub map_keyword: keyword::linked_map, - pub hasher: Opt, - pub key: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, -} - #[derive(Parse, ToTokens, Debug)] struct DeclStorageDoubleMap { pub map_keyword: keyword::double_map, @@ -245,29 +236,38 @@ struct DeclStorageDoubleMap { #[derive(ToTokens, Debug)] enum Hasher { - Blake2_256(keyword::blake2_256), - Blake2_128(keyword::blake2_128), + Blake2_256(keyword::opaque_blake2_256), + Blake2_128(keyword::opaque_blake2_128), Blake2_128Concat(keyword::blake2_128_concat), - Twox256(keyword::twox_256), - Twox128(keyword::twox_128), + Twox256(keyword::opaque_twox_256), + Twox128(keyword::opaque_twox_128), Twox64Concat(keyword::twox_64_concat), + Identity(keyword::identity), } impl syn::parse::Parse for Hasher { fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { let lookahead = input.lookahead1(); - if lookahead.peek(keyword::blake2_256) { + if lookahead.peek(keyword::opaque_blake2_256) { Ok(Self::Blake2_256(input.parse()?)) - } else if lookahead.peek(keyword::blake2_128) { + } else if lookahead.peek(keyword::opaque_blake2_128) { Ok(Self::Blake2_128(input.parse()?)) } else if lookahead.peek(keyword::blake2_128_concat) { Ok(Self::Blake2_128Concat(input.parse()?)) - } else if lookahead.peek(keyword::twox_256) { + } else if lookahead.peek(keyword::opaque_twox_256) { Ok(Self::Twox256(input.parse()?)) - } else if lookahead.peek(keyword::twox_128) { + } else if lookahead.peek(keyword::opaque_twox_128) { Ok(Self::Twox128(input.parse()?)) } else if lookahead.peek(keyword::twox_64_concat) { Ok(Self::Twox64Concat(input.parse()?)) + } else if lookahead.peek(keyword::identity) { + Ok(Self::Identity(input.parse()?)) + } else if lookahead.peek(keyword::tainted) { + Ok(Self::Blake2_128Concat(input.parse()?)) + } else if lookahead.peek(keyword::natural) { + Ok(Self::Twox64Concat(input.parse()?)) + } else if lookahead.peek(keyword::prehashed) { + Ok(Self::Identity(input.parse()?)) } else { Err(lookahead.error()) } @@ -313,6 +313,7 @@ impl From for super::HasherKind { Hasher::Twox256(_) => super::HasherKind::Twox256, Hasher::Twox128(_) => super::HasherKind::Twox128, Hasher::Twox64Concat(_) => super::HasherKind::Twox64Concat, + Hasher::Identity(_) => super::HasherKind::Identity, } } } @@ -464,7 +465,7 @@ fn parse_storage_line_defs( let span = line.storage_type.span(); let no_hasher_error = || syn::Error::new( span, - "Default hasher has been removed, use explicit hasher(blake2_256) instead." + "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead." ); let storage_type = match line.storage_type { @@ -475,13 +476,6 @@ fn parse_storage_line_defs( value: map.value, } ), - DeclStorageType::LinkedMap(map) => super::StorageLineTypeDef::LinkedMap( - super::MapDef { - hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), - key: map.key, - value: map.value, - } - ), DeclStorageType::DoubleMap(map) => super::StorageLineTypeDef::DoubleMap( super::DoubleMapDef { hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs index 91bdf7bf87c808ffd3451e29447b2944841aeef0..cbd477354e820946a9f3f1692d70c3a7f2a52ca5 100644 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ b/frame/support/procedural/src/storage/storage_struct.rs @@ -158,47 +158,6 @@ pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStre } ) }, - StorageLineTypeDef::LinkedMap(map) => { - let hasher = map.hasher.to_storage_hasher_struct(); - - let head_prefix_str = syn::LitStr::new( - &format!("HeadOf{}", line.name.to_string()), - line.name.span(), - ); - - quote!( - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - type KeyFormat = Self; - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - - impl<#impl_trait> #scrate::storage::generator::LinkedMapKeyFormat for #storage_struct { - type Hasher = #scrate::#hasher; - - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - - fn head_prefix() -> &'static [u8] { - #head_prefix_str.as_bytes() - } - } - ) - }, StorageLineTypeDef::DoubleMap(map) => { let hasher1 = map.hasher1.to_storage_hasher_struct(); let hasher2 = map.hasher2.to_storage_hasher_struct(); diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 52773f6fbeea5a235c1b816494057774a0045dfa..f199f1245db0499aa90425ac6da3966fa2132201 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,8 +9,11 @@ repository = "https://github.com/paritytech/substrate/" description = "Proc macro helpers for procedural macros" [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0-alpha.2", path = "./derive" } +frame-support-procedural-tools-derive = { version = "2.0.0-alpha.5", path = "./derive" } proc-macro2 = "1.0.6" quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } proc-macro-crate = "0.1.4" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index 6bed290c7dede02e1e12827d87a2d254d5360013..bf6346ab1b67a63c57fb021550dec9a8fda0420a 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -15,3 +15,6 @@ proc-macro = true proc-macro2 = "1.0.6" quote = { version = "1.0.3", features = ["proc-macro"] } syn = { version = "1.0.7", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index a9c48097ad64ab6ecb28a10c7f7c66dd7b30cf18..aadcec67a3606e0d801efb60965cc27d87624201 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -25,18 +25,35 @@ pub use frame_metadata::{ }; pub use crate::weights::{ SimpleDispatchInfo, GetDispatchInfo, DispatchInfo, WeighData, ClassifyDispatch, - TransactionPriority, Weight, WeighBlock, PaysFee, + TransactionPriority, Weight, PaysFee, PostDispatchInfo, WithPostDispatchInfo, }; -pub use sp_runtime::{traits::Dispatchable, DispatchError, DispatchResult}; +pub use sp_runtime::{traits::Dispatchable, DispatchError}; pub use crate::traits::{CallMetadata, GetCallMetadata, GetCallName}; +/// The return typ of a `Dispatchable` in frame. When returned explicitly from +/// a dispatchable function it allows overriding the default `PostDispatchInfo` +/// returned from a dispatch. +pub type DispatchResultWithPostInfo = + sp_runtime::DispatchResultWithInfo; + +/// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from +/// dispatchable functions and is automatically converted to the augmented type. Should be +/// used whenever the `PostDispatchInfo` does not need to be overwritten. As this should +/// be the common case it is the implicit return type when none is specified. +pub type DispatchResult = Result<(), sp_runtime::DispatchError>; + +/// The error type contained in a `DispatchResultWithPostInfo`. +pub type DispatchErrorWithPostInfo = + sp_runtime::DispatchErrorWithPostInfo; + + /// A type that cannot be instantiated. pub enum Never {} /// Serializable version of Dispatchable. /// This value can be used as a "function" in an extrinsic. pub trait Callable { - type Call: Dispatchable + Codec + Clone + PartialEq + Eq; + type Call: Dispatchable + Codec + Clone + PartialEq + Eq; } // dirty hack to work around serde_derive issue @@ -57,12 +74,14 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; +/// # use frame_support::weights::SimpleDispatchInfo; /// # use frame_system::{self as system, Trait, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { /// /// // Private functions are dispatchable, but not available to other /// // FRAME pallets. +/// #[weight = SimpleDispatchInfo::default()] /// fn my_function(origin, var: u64) -> dispatch::DispatchResult { /// // Your implementation /// Ok(()) @@ -70,6 +89,7 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// // Public functions are both dispatchable and available to other /// // FRAME pallets. +/// #[weight = SimpleDispatchInfo::default()] /// pub fn my_public_function(origin) -> dispatch::DispatchResult { /// // Your implementation /// Ok(()) @@ -97,15 +117,17 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; +/// # use frame_support::weights::SimpleDispatchInfo; /// # use frame_system::{self as system, Trait, ensure_signed}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { -/// +/// #[weight = SimpleDispatchInfo::default()] /// fn my_long_function(origin) -> dispatch::DispatchResult { /// // Your implementation /// Ok(()) /// } /// +/// #[weight = SimpleDispatchInfo::default()] /// fn my_short_function(origin) { /// // Your implementation /// } @@ -114,6 +136,44 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # fn main() {} /// ``` /// +/// ### Consuming only portions of the annotated static weight +/// +/// Per default a callable function consumes all of its static weight as declared via +/// the #[weight] attribute. However, there are use cases where only a portion of this +/// weight should be consumed. In that case the static weight is charged pre dispatch and +/// the difference is refunded post dispatch. +/// +/// In order to make use of this feature the function must return `DispatchResultWithPostInfo` +/// in place of the default `DispatchResult`. Then the actually consumed weight can be returned. +/// To consume a non default weight while returning an error +/// [`WithPostDispatchInfo::with_weight`](./weight/trait.WithPostDispatchInfo.html) can be used +/// to augment any error with custom weight information. +/// +/// ``` +/// # #[macro_use] +/// # extern crate frame_support; +/// # use frame_support::dispatch::{DispatchResultWithPostInfo, WithPostDispatchInfo}; +/// # use frame_support::weights::SimpleDispatchInfo; +/// # use frame_system::{self as system, Trait, ensure_signed}; +/// decl_module! { +/// pub struct Module for enum Call where origin: T::Origin { +/// #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] +/// fn my_long_function(origin, do_expensive_calc: bool) -> DispatchResultWithPostInfo { +/// ensure_signed(origin).map_err(|e| e.with_weight(100_000))?; +/// if do_expensive_calc { +/// // do the expensive calculation +/// // ... +/// // return None to indicate that we are using all weight (the default) +/// return Ok(None.into()); +/// } +/// // expensive calculation not executed: use only a portion of the weight +/// Ok(Some(100_000).into()) +/// } +/// } +/// } +/// # fn main() {} +/// ``` +/// /// ### Privileged Function Example /// /// A privileged function checks that the origin of the call is `ROOT`. @@ -122,9 +182,11 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// # #[macro_use] /// # extern crate frame_support; /// # use frame_support::dispatch; +/// # use frame_support::weights::SimpleDispatchInfo; /// # use frame_system::{self as system, Trait, ensure_signed, ensure_root}; /// decl_module! { /// pub struct Module for enum Call where origin: T::Origin { +/// #[weight = SimpleDispatchInfo::default()] /// fn my_privileged_function(origin) -> dispatch::DispatchResult { /// ensure_root(origin)?; /// // Your implementation @@ -200,13 +262,23 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// is a runtime upgrade. This allows each module to upgrade its storage before the storage items are used. /// As such, **calling other modules must be avoided**!! Using this function will implement the /// [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. +/// Function signature must be `fn on_runtime_upgrade() -> frame_support::weights::Weight`. +/// /// * `on_initialize`: Executes at the beginning of a block. Using this function will -/// implement the [`OnInitialize`](../sp_runtime/traits/trait.OnInitialize.html) trait. +/// implement the [`OnInitialize`](./trait.OnInitialize.html) trait. +/// Function signature can be either: +/// * `fn on_initialize(n: BlockNumber) -> frame_support::weights::Weight` or +/// * `fn on_initialize() -> frame_support::weights::Weight` +/// /// * `on_finalize`: Executes at the end of a block. Using this function will -/// implement the [`OnFinalize`](../sp_runtime/traits/trait.OnFinalize.html) trait. +/// implement the [`OnFinalize`](./traits/trait.OnFinalize.html) trait. +/// Function signature can be either: +/// * `fn on_finalize(n: BlockNumber) -> frame_support::weights::Weight` or +/// * `fn on_finalize() -> frame_support::weights::Weight` +/// /// * `offchain_worker`: Executes at the beginning of a block and produces extrinsics for a future block /// upon completion. Using this function will implement the -/// [`OffchainWorker`](../sp_runtime/traits/trait.OffchainWorker.html) trait. +/// [`OffchainWorker`](./traits/trait.OffchainWorker.html) trait. #[macro_export] macro_rules! decl_module { // Entry point #1. @@ -327,7 +399,7 @@ macro_rules! decl_module { "`deposit_event` function is reserved and must follow the syntax: `$vis:vis fn deposit_event() = default;`" ); }; - // Add on_finalize, without a given weight. + // Add on_finalize (@normalize $(#[$attr:meta])* pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> @@ -354,7 +426,6 @@ macro_rules! decl_module { { $( $on_initialize )* } { $( $on_runtime_upgrade )* } { - #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } } { $( $offchain )* } @@ -364,7 +435,7 @@ macro_rules! decl_module { $($rest)* ); }; - // Add on_finalize, given weight. + // compile_error on_finalize, given weight removed syntax. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident<$trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)?> @@ -383,26 +454,12 @@ macro_rules! decl_module { fn on_finalize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } $($rest:tt)* ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { - #[weight = $weight] - fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } - } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - [ $( $dispatchables )* ] - $($rest)* + compile_error!( + "`on_finalize` can't be given weight attribute anymore, weight must be returned by \ + `on_initialize` or `on_runtime_upgrade` instead" ); }; - // Add on_runtime_upgrade, without a given weight. + // compile_error on_runtime_upgrade, without a given weight removed syntax. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident< @@ -422,26 +479,11 @@ macro_rules! decl_module { fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } $($rest:tt)* ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { - #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] - fn on_runtime_upgrade( $( $param_name : $param ),* ) { $( $impl )* } - } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - [ $( $dispatchables )* ] - $($rest)* + compile_error!( + "`on_runtime_upgrade` must return Weight, signature has changed." ); }; - // Add on_runtime_upgrade, given weight. + // compile_error on_runtime_upgrade, given weight removed syntax. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident< @@ -461,6 +503,31 @@ macro_rules! decl_module { #[weight = $weight:expr] fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } $($rest:tt)* + ) => { + compile_error!( + "`on_runtime_upgrade` can't be given weight attribute anymore, weight must be returned \ + by the function directly." + ); + }; + // Add on_runtime_upgrade + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + {} + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* ) => { $crate::decl_module!(@normalize $(#[$attr])* @@ -470,8 +537,7 @@ macro_rules! decl_module { { $( $deposit_event )* } { $( $on_initialize )* } { - #[weight = $weight] - fn on_runtime_upgrade( $( $param_name : $param ),* ) { $( $impl )* } + fn on_runtime_upgrade( $( $param_name : $param ),* ) -> $return { $( $impl )* } } { $( $on_finalize )* } { $( $offchain )* } @@ -481,7 +547,7 @@ macro_rules! decl_module { $($rest)* ); }; - // Add on_initialize, without a given weight. + // compile_error on_initialize, without a given weight removed syntax. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident< @@ -501,26 +567,11 @@ macro_rules! decl_module { fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } $($rest:tt)* ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { - #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] - fn on_initialize( $( $param_name : $param ),* ) { $( $impl )* } - } - { $( $on_runtime_upgrade )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - [ $( $dispatchables )* ] - $($rest)* + compile_error!( + "`on_initialize` must return Weight, signature has changed." ); }; - // Add on_initialize, given weight. + // compile_error on_initialize, with given weight removed syntax. (@normalize $(#[$attr:meta])* pub struct $mod_type:ident< @@ -540,6 +591,31 @@ macro_rules! decl_module { #[weight = $weight:expr] fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } $($rest:tt)* + ) => { + compile_error!( + "`on_initialize` can't be given weight attribute anymore, weight must be returned \ + by the function directly." + ); + }; + // Add on_initialize + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + {} + { $( $on_runtime_upgrade:tt )* } + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_initialize( $( $param_name:ident : $param:ty ),* $(,)? ) -> $return:ty { $( $impl:tt )* } + $($rest:tt)* ) => { $crate::decl_module!(@normalize $(#[$attr])* @@ -548,8 +624,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { - #[weight = $weight] - fn on_initialize( $( $param_name : $param ),* ) { $( $impl )* } + fn on_initialize( $( $param_name : $param ),* ) -> $return { $( $impl )* } } { $( $on_runtime_upgrade )* } { $( $on_finalize )* } @@ -794,27 +869,10 @@ macro_rules! decl_module { ) $( -> $result:ty )* { $( $impl:tt )* } $($rest:tt)* ) => { - $crate::decl_module!(@normalize - $(#[$attr])* - pub struct $mod_type< - $trait_instance: $trait_name$(, $instance: $instantiable $(= $module_default_instance)?)? - > - for enum $call_type where origin: $origin_type, system = $system - { $( $other_where_bounds )* } - { $( $deposit_event )* } - { $( $on_initialize )* } - { $( $on_runtime_upgrade )* } - { $( $on_finalize )* } - { $( $offchain )* } - { $( $constants )* } - { $( $error_type )* } - [ $( $dispatchables )* ] - $(#[doc = $doc_attr])* - #[weight = $crate::dispatch::SimpleDispatchInfo::default()] - $fn_vis fn $fn_name( - $from $(, $(#[$codec_attr])* $param_name : $param )* - ) $( -> $result )* { $( $impl )* } - $($rest)* + compile_error!(concat!( + "Missing weight for ", stringify!($ident), + ". Every dispatchable must have a #[weight] attribute." + ) ); }; // Ignore any ident which is not `origin` with type `T::Origin`. @@ -935,7 +993,7 @@ macro_rules! decl_module { $ignore:ident $mod_type:ident<$trait_instance:ident $(, $instance:ident)?> $fn_name:ident $origin:ident $system:ident [ $( $param_name:ident),* ] ) => { - <$mod_type<$trait_instance $(, $instance)?>>::$fn_name( $origin $(, $param_name )* ) + <$mod_type<$trait_instance $(, $instance)?>>::$fn_name( $origin $(, $param_name )* ).map(Into::into).map_err(Into::into) }; // no `deposit_event` function wanted @@ -965,14 +1023,13 @@ macro_rules! decl_module { (@impl_on_initialize $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } - #[weight = $weight:expr] - fn on_initialize() { $( $impl:tt )* } + fn on_initialize() -> $return:ty { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnInitialize<$trait_instance::BlockNumber> + $crate::traits::OnInitialize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize(_block_number_not_used: $trait_instance::BlockNumber) { + fn on_initialize(_block_number_not_used: $trait_instance::BlockNumber) -> $return { use $crate::sp_std::if_std; if_std! { use $crate::tracing; @@ -987,14 +1044,13 @@ macro_rules! decl_module { (@impl_on_initialize $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } - #[weight = $weight:expr] - fn on_initialize($param:ident : $param_ty:ty) { $( $impl:tt )* } + fn on_initialize($param:ident : $param_ty:ty) -> $return:ty { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnInitialize<$trait_instance::BlockNumber> + $crate::traits::OnInitialize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_initialize($param: $param_ty) { + fn on_initialize($param: $param_ty) -> $return { use $crate::sp_std::if_std; if_std! { use $crate::tracing; @@ -1011,7 +1067,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnInitialize<$trait_instance::BlockNumber> + $crate::traits::OnInitialize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1019,14 +1075,13 @@ macro_rules! decl_module { (@impl_on_runtime_upgrade $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } - #[weight = $weight:expr] - fn on_runtime_upgrade() { $( $impl:tt )* } + fn on_runtime_upgrade() -> $return:ty { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnRuntimeUpgrade + $crate::traits::OnRuntimeUpgrade for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { - fn on_runtime_upgrade() { + fn on_runtime_upgrade() -> $return { use $crate::sp_std::if_std; if_std! { use $crate::tracing; @@ -1043,7 +1098,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnRuntimeUpgrade + $crate::traits::OnRuntimeUpgrade for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1052,11 +1107,10 @@ macro_rules! decl_module { (@impl_on_finalize $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } - #[weight = $weight:expr] fn on_finalize() { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnFinalize<$trait_instance::BlockNumber> + $crate::traits::OnFinalize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize(_block_number_not_used: $trait_instance::BlockNumber) { @@ -1074,11 +1128,10 @@ macro_rules! decl_module { (@impl_on_finalize $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } - #[weight = $weight:expr] fn on_finalize($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnFinalize<$trait_instance::BlockNumber> + $crate::traits::OnFinalize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn on_finalize($param: $param_ty) { @@ -1098,57 +1151,19 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OnFinalize<$trait_instance::BlockNumber> + $crate::traits::OnFinalize<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { } }; - (@impl_block_hooks_weight - $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; - { $( $other_where_bounds:tt )* } - @runtime_upgrade $( - #[weight = $weight_runtime_update:expr] - fn on_runtime_upgrade($( $param_runtime_upgrade:ident : $param_ty_runtime_upgrade:ty )*) { $( $impl_runtime_upgrade:tt )* } - )? - @init $( - #[weight = $weight_initialize:expr] - fn on_initialize($( $param_initialize:ident : $param_ty_initialize:ty )*) { $( $impl_initialize:tt )* } - )? - @fin $( - #[weight = $weight_finalize:expr] - fn on_finalize($( $param_finalize:ident : $param_ty_finalize:ty )*) { $( $impl_finalize:tt )* } - )? - ) => { - impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::dispatch::WeighBlock<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where - $( $other_where_bounds )* - { - $( - fn on_runtime_upgrade() -> $crate::dispatch::Weight { - >::weigh_data(&$weight_initialize, ()) - } - )? - $( - fn on_initialize(n: $trait_instance::BlockNumber) -> $crate::dispatch::Weight { - >::weigh_data(&$weight_initialize, n) - } - )? - $( - fn on_finalize(n: $trait_instance::BlockNumber) -> $crate::dispatch::Weight { - >::weigh_data(&$weight_finalize, n) - } - )? - } - }; - (@impl_offchain $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } fn offchain_worker() { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OffchainWorker<$trait_instance::BlockNumber> + $crate::traits::OffchainWorker<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker(_block_number_not_used: $trait_instance::BlockNumber) { $( $impl )* } @@ -1161,7 +1176,7 @@ macro_rules! decl_module { fn offchain_worker($param:ident : $param_ty:ty) { $( $impl:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OffchainWorker<$trait_instance::BlockNumber> + $crate::traits::OffchainWorker<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { fn offchain_worker($param: $param_ty) { $( $impl )* } @@ -1173,7 +1188,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } ) => { impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> - $crate::sp_runtime::traits::OffchainWorker<$trait_instance::BlockNumber> + $crate::traits::OffchainWorker<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* {} }; @@ -1402,15 +1417,6 @@ macro_rules! decl_module { $( $on_finalize )* } - $crate::decl_module! { - @impl_block_hooks_weight - $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; - { $( $other_where_bounds )* } - @runtime_upgrade $( $on_runtime_upgrade )* - @init $( $on_initialize )* - @fin $( $on_finalize )* - } - $crate::decl_module! { @impl_offchain $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; @@ -1483,9 +1489,9 @@ macro_rules! decl_module { &$weight, ($( $param_name, )*) ); - $crate::dispatch::DispatchInfo { - weight, - class, + $crate::dispatch::DispatchInfo { + weight, + class, pays_fee, } }, @@ -1587,7 +1593,9 @@ macro_rules! decl_module { { type Trait = $trait_instance; type Origin = $origin_type; - fn dispatch(self, _origin: Self::Origin) -> $crate::sp_runtime::DispatchResult { + type Info = $crate::weights::DispatchInfo; + type PostInfo = $crate::weights::PostDispatchInfo; + fn dispatch(self, _origin: Self::Origin) -> $crate::dispatch::DispatchResultWithPostInfo { match self { $( $call_type::$fn_name( $( $param_name ),* ) => { @@ -1612,10 +1620,10 @@ macro_rules! decl_module { where $( $other_where_bounds )* { #[doc(hidden)] - pub fn dispatch>( + pub fn dispatch>( d: D, origin: D::Origin - ) -> $crate::sp_runtime::DispatchResult { + ) -> $crate::dispatch::DispatchResultWithPostInfo { d.dispatch(origin) } } @@ -1713,10 +1721,12 @@ macro_rules! impl_outer_dispatch { impl $crate::dispatch::Dispatchable for $call_type { type Origin = $origin; type Trait = $call_type; + type Info = $crate::weights::DispatchInfo; + type PostInfo = $crate::weights::PostDispatchInfo; fn dispatch( self, origin: $origin, - ) -> $crate::sp_runtime::DispatchResult { + ) -> $crate::dispatch::DispatchResultWithPostInfo { $crate::impl_outer_dispatch! { @DISPATCH_MATCH self @@ -2076,9 +2086,10 @@ macro_rules! __check_reserved_fn_name { #[allow(dead_code)] mod tests { use super::*; - use crate::sp_runtime::traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade}; use crate::weights::{DispatchInfo, DispatchClass}; - use crate::traits::{CallMetadata, GetCallMetadata, GetCallName}; + use crate::traits::{ + CallMetadata, GetCallMetadata, GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade + }; pub trait Trait: system::Trait + Sized where Self::AccountId: From { type Origin; @@ -2098,35 +2109,34 @@ mod tests { } } - struct BlockWeight; - impl> WeighData for BlockWeight { - fn weigh_data(&self, target: BlockNumber) -> Weight { - let target: u32 = target.into(); - if target % 2 == 0 { 10 } else { 0 } - } - } - decl_module! { pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { /// Hi, this is a comment. + #[weight = SimpleDispatchInfo::default()] fn aux_0(_origin) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::default()] fn aux_1(_origin, #[compact] _data: u32,) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::default()] fn aux_2(_origin, _data: i32, _data2: String) -> DispatchResult { unreachable!() } + #[weight = SimpleDispatchInfo::FixedNormal(3)] fn aux_3(_origin) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::default()] fn aux_4(_origin, _data: i32) -> DispatchResult { unreachable!() } - fn aux_5(_origin, _data: i32, #[compact] _data2: u32,) -> DispatchResult { unreachable!() } - #[weight = SimpleDispatchInfo::FixedNormal(7)] - fn on_initialize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_initialize") } } - #[weight = BlockWeight] - fn on_finalize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_finalize") } } - #[weight = SimpleDispatchInfo::FixedOperational(69)] - fn on_runtime_upgrade() { } - fn offchain_worker() {} + #[weight = SimpleDispatchInfo::default()] + fn aux_5(_origin, _data: i32, #[compact] _data2: u32,) -> DispatchResult { unreachable!() } #[weight = SimpleDispatchInfo::FixedOperational(5)] - fn operational(_origin,) { unreachable!() } + fn operational(_origin) { unreachable!() } + + fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } + fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } + fn on_runtime_upgrade() -> Weight { 10 } + fn offchain_worker() {} } } @@ -2254,10 +2264,15 @@ mod tests { #[test] #[should_panic(expected = "on_initialize")] - fn on_initialize_should_work() { + fn on_initialize_should_work_1() { as OnInitialize>::on_initialize(42); } + #[test] + fn on_initialize_should_work_2() { + assert_eq!( as OnInitialize>::on_initialize(10), 7); + } + #[test] #[should_panic(expected = "on_finalize")] fn on_finalize_should_work() { @@ -2266,7 +2281,7 @@ mod tests { #[test] fn on_runtime_upgrade_should_work() { - as OnRuntimeUpgrade>::on_runtime_upgrade(); + assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10); } #[test] @@ -2288,18 +2303,6 @@ mod tests { ); } - #[test] - fn weight_for_block_hooks() { - // independent of block number - assert_eq!(>::on_initialize(0), 7); - assert_eq!(>::on_initialize(10), 7); - assert_eq!(>::on_initialize(100), 7); - - // dependent - assert_eq!(>::on_finalize(2), 10); - assert_eq!(>::on_finalize(3), 0); - } - #[test] fn call_name() { let name = Call::::aux_3().get_call_name(); diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index 3b105e979d09affe2efe0a2a5b81cf99d474201f..f619250726deda5caea1f76894d1a75793458f01 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -35,6 +35,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// /// ``` /// # use frame_support::{decl_error, decl_module}; +/// # use frame_support::weights::SimpleDispatchInfo; /// decl_error! { /// /// Errors that can occur in my module. /// pub enum MyError for Module { @@ -54,6 +55,7 @@ pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; /// pub struct Module for enum Call where origin: T::Origin { /// type Error = MyError; /// +/// #[weight = SimpleDispatchInfo::default()] /// fn do_something(origin) -> frame_support::dispatch::DispatchResult { /// Err(MyError::::YouAreNotCoolEnough.into()) /// } diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 8b7de01159df3e1417dfdb82c08f684237442f13..1184b379f44469d72278dd1562223ef51631c6d4 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -288,6 +288,7 @@ macro_rules! __decl_generic_event { } impl<$( $generic_param ),* $(, $instance)?> RawEvent<$( $generic_param ),* $(, $instance)?> { #[allow(dead_code)] + #[doc(hidden)] pub fn metadata() -> &'static [$crate::event::EventMetadata] { $crate::__events_to_metadata!(; $( $events )* ) } diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index aae4aceb7fe82becfe41876eaf20d79b64df464a..693e929a309e366b0605805d1721429904d04806 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -28,6 +28,7 @@ pub trait Hashable: Sized { fn twox_128(&self) -> [u8; 16]; fn twox_256(&self) -> [u8; 32]; fn twox_64_concat(&self) -> Vec; + fn identity(&self) -> Vec; } impl Hashable for T { @@ -49,6 +50,7 @@ impl Hashable for T { fn twox_64_concat(&self) -> Vec { self.using_encoded(Twox64Concat::hash) } + fn identity(&self) -> Vec { self.encode() } } /// Hasher to use to hash keys to insert to storage. @@ -57,6 +59,25 @@ pub trait StorageHasher: 'static { fn hash(x: &[u8]) -> Self::Output; } +/// Hasher to use to hash keys to insert to storage. +pub trait ReversibleStorageHasher: StorageHasher { + fn reverse(x: &[u8]) -> &[u8]; +} + +/// Store the key directly. +pub struct Identity; +impl StorageHasher for Identity { + type Output = Vec; + fn hash(x: &[u8]) -> Vec { + x.to_vec() + } +} +impl ReversibleStorageHasher for Identity { + fn reverse(x: &[u8]) -> &[u8] { + x + } +} + /// Hash storage keys with `concat(twox64(key), key)` pub struct Twox64Concat; impl StorageHasher for Twox64Concat { @@ -69,6 +90,15 @@ impl StorageHasher for Twox64Concat { .collect::>() } } +impl ReversibleStorageHasher for Twox64Concat { + fn reverse(x: &[u8]) -> &[u8] { + if x.len() < 8 { + crate::debug::error!("Invalid reverse: hash length too short"); + return &[] + } + &x[8..] + } +} /// Hash storage keys with `concat(blake2_128(key), key)` pub struct Blake2_128Concat; @@ -82,6 +112,15 @@ impl StorageHasher for Blake2_128Concat { .collect::>() } } +impl ReversibleStorageHasher for Blake2_128Concat { + fn reverse(x: &[u8]) -> &[u8] { + if x.len() < 16 { + crate::debug::error!("Invalid reverse: hash length too short"); + return &[] + } + &x[16..] + } +} /// Hash storage keys with blake2 128 pub struct Blake2_128; diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index 00b229750db47435ae51c16142336f02b62e6b79..d37a438fc6f8178b86470a92a20f33169ca4b0d5 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -67,11 +67,12 @@ pub mod traits; pub mod weights; pub use self::hash::{ - Twox256, Twox128, Blake2_256, Blake2_128, Twox64Concat, Blake2_128Concat, Hashable, + Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, Hashable, StorageHasher }; pub use self::storage::{ - StorageValue, StorageMap, StorageLinkedMap, StorageDoubleMap, StoragePrefixedMap + StorageValue, StorageMap, StorageDoubleMap, StoragePrefixedMap, IterableStorageMap, + IterableStorageDoubleMap, migration }; pub use self::dispatch::{Parameter, Callable, IsSubType}; pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; @@ -139,6 +140,8 @@ macro_rules! ord_parameter_types { fn contains(t: &$type) -> bool { &$value == t } fn sorted_members() -> $crate::sp_std::prelude::Vec<$type> { vec![$value] } fn count() -> usize { 1 } + #[cfg(feature = "runtime-benchmarks")] + fn add(_: &$type) {} } } } @@ -208,7 +211,11 @@ macro_rules! assert_err { #[cfg(feature = "std")] macro_rules! assert_ok { ( $x:expr $(,)? ) => { - assert_eq!($x, Ok(())); + let is = $x; + match is { + Ok(_) => (), + _ => assert!(false, "Expected Ok(_). Got {:#?}", is), + } }; ( $x:expr, $y:expr $(,)? ) => { assert_eq!($x, Ok($y)); @@ -253,24 +260,24 @@ mod tests { decl_storage! { trait Store for Module as Test { pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): - linked_map hasher(twox_64_concat) u32 => u64; - pub OptionLinkedMap: linked_map hasher(blake2_256) u32 => Option; + map hasher(twox_64_concat) u32 => u64; + pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; pub GenericData get(fn generic_data): - linked_map hasher(twox_128) T::BlockNumber => T::BlockNumber; + map hasher(identity) T::BlockNumber => T::BlockNumber; pub GenericData2 get(fn generic_data2): - linked_map hasher(blake2_256) T::BlockNumber => Option; + map hasher(blake2_128_concat) T::BlockNumber => Option; pub GetterNoFnKeyword get(no_fn): Option; pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): - double_map hasher(twox_64_concat) u32, hasher(blake2_256) u32 => u64; + double_map hasher(twox_64_concat) u32, hasher(blake2_128_concat) u32 => u64; pub GenericDataDM: - double_map hasher(blake2_256) T::BlockNumber, hasher(twox_128) T::BlockNumber + double_map hasher(blake2_128_concat) T::BlockNumber, hasher(identity) T::BlockNumber => T::BlockNumber; pub GenericData2DM: - double_map hasher(blake2_256) T::BlockNumber, hasher(twox_256) T::BlockNumber + double_map hasher(blake2_128_concat) T::BlockNumber, hasher(twox_64_concat) T::BlockNumber => Option; pub AppendableDM: - double_map hasher(blake2_256) u32, hasher(blake2_256) T::BlockNumber => Vec; + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Vec; } } @@ -286,8 +293,16 @@ mod tests { type Map = Data; + trait Sorted { fn sorted(self) -> Self; } + impl Sorted for Vec { + fn sorted(mut self) -> Self { + self.sort(); + self + } + } + #[test] - fn linked_map_issue_3318() { + fn map_issue_3318() { new_test_ext().execute_with(|| { OptionLinkedMap::insert(1, 1); assert_eq!(OptionLinkedMap::get(1), Some(1)); @@ -297,31 +312,31 @@ mod tests { } #[test] - fn linked_map_swap_works() { + fn map_swap_works() { new_test_ext().execute_with(|| { OptionLinkedMap::insert(0, 0); OptionLinkedMap::insert(1, 1); OptionLinkedMap::insert(2, 2); OptionLinkedMap::insert(3, 3); - let collect = || OptionLinkedMap::enumerate().collect::>(); - assert_eq!(collect(), vec![(3, 3), (2, 2), (1, 1), (0, 0)]); + let collect = || OptionLinkedMap::iter().collect::>().sorted(); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); // Two existing OptionLinkedMap::swap(1, 2); - assert_eq!(collect(), vec![(3, 3), (2, 1), (1, 2), (0, 0)]); + assert_eq!(collect(), vec![(0, 0), (1, 2), (2, 1), (3, 3)]); // Back to normal OptionLinkedMap::swap(2, 1); - assert_eq!(collect(), vec![(3, 3), (2, 2), (1, 1), (0, 0)]); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); // Left existing OptionLinkedMap::swap(2, 5); - assert_eq!(collect(), vec![(5, 2), (3, 3), (1, 1), (0, 0)]); + assert_eq!(collect(), vec![(0, 0), (1, 1), (3, 3), (5, 2)]); // Right existing OptionLinkedMap::swap(5, 2); - assert_eq!(collect(), vec![(2, 2), (3, 3), (1, 1), (0, 0)]); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); }); } @@ -356,7 +371,7 @@ mod tests { } #[test] - fn linked_map_basic_insert_remove_should_work() { + fn map_basic_insert_remove_should_work() { new_test_ext().execute_with(|| { // initialized during genesis assert_eq!(Map::get(&15u32), 42u64); @@ -382,54 +397,45 @@ mod tests { } #[test] - fn linked_map_enumeration_and_head_should_work() { + fn map_iteration_should_work() { new_test_ext().execute_with(|| { - assert_eq!(Map::head(), Some(15)); - assert_eq!(Map::enumerate().collect::>(), vec![(15, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); // insert / remove let key = 17u32; Map::insert(key, 4u64); - assert_eq!(Map::head(), Some(key)); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 4), (15, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42), (key, 4)]); assert_eq!(Map::take(&15), 42u64); assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::head(), None); - assert_eq!(Map::enumerate().collect::>(), vec![]); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); // Add couple of more elements Map::insert(key, 42u64); - assert_eq!(Map::head(), Some(key)); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42)]); Map::insert(key + 1, 43u64); - assert_eq!(Map::head(), Some(key + 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key + 1, 43), (key, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42), (key + 1, 43)]); // mutate let key = key + 2; Map::mutate(&key, |val| { *val = 15; }); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 15), (key - 1, 43), (key - 2, 42)]); - assert_eq!(Map::head(), Some(key)); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 15)]); Map::mutate(&key, |val| { *val = 17; }); - assert_eq!(Map::enumerate().collect::>(), vec![(key, 17), (key - 1, 43), (key - 2, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 17)]); // remove first Map::remove(&key); - assert_eq!(Map::head(), Some(key - 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key - 1, 43), (key - 2, 42)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43)]); // remove last from the list Map::remove(&(key - 2)); - assert_eq!(Map::head(), Some(key - 1)); - assert_eq!(Map::enumerate().collect::>(), vec![(key - 1, 43)]); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 1, 43)]); // remove the last element Map::remove(&(key - 1)); - assert_eq!(Map::head(), None); - assert_eq!(Map::enumerate().collect::>(), vec![]); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); }); } @@ -498,7 +504,7 @@ mod tests { hasher: StorageHasher::Twox64Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("u64"), - is_linked: true, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructData(PhantomData::)) @@ -509,10 +515,10 @@ mod tests { name: DecodeDifferent::Encode("OptionLinkedMap"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("u32"), - is_linked: true, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructOptionLinkedMap(PhantomData::)) @@ -523,10 +529,10 @@ mod tests { name: DecodeDifferent::Encode("GenericData"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map{ - hasher: StorageHasher::Twox128, + hasher: StorageHasher::Identity, key: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), - is_linked: true + unused: false }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGenericData(PhantomData::)) @@ -537,10 +543,10 @@ mod tests { name: DecodeDifferent::Encode("GenericData2"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map{ - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), - is_linked: true + unused: false }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGenericData2(PhantomData::)) @@ -564,7 +570,7 @@ mod tests { key1: DecodeDifferent::Encode("u32"), key2: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("u64"), - key2_hasher: StorageHasher::Blake2_256, + key2_hasher: StorageHasher::Blake2_128Concat, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructDataDM(PhantomData::)) @@ -575,11 +581,11 @@ mod tests { name: DecodeDifferent::Encode("GenericDataDM"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key1: DecodeDifferent::Encode("T::BlockNumber"), key2: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Twox128, + key2_hasher: StorageHasher::Identity, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGenericDataDM(PhantomData::)) @@ -590,11 +596,11 @@ mod tests { name: DecodeDifferent::Encode("GenericData2DM"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key1: DecodeDifferent::Encode("T::BlockNumber"), key2: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Twox256, + key2_hasher: StorageHasher::Twox64Concat, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) @@ -605,11 +611,11 @@ mod tests { name: DecodeDifferent::Encode("AppendableDM"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key1: DecodeDifferent::Encode("u32"), key2: DecodeDifferent::Encode("T::BlockNumber"), value: DecodeDifferent::Encode("Vec"), - key2_hasher: StorageHasher::Blake2_256, + key2_hasher: StorageHasher::Blake2_128Concat, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index 46662e53548a51d8cda0def166d2126a7c0c2216..3d99ddaa84ff4f62fc008a34c7b8185f92b675de 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -256,9 +256,8 @@ mod tests { struct TestExtension; impl sp_runtime::traits::SignedExtension for TestExtension { type AccountId = u32; - type Call = u32; + type Call = (); type AdditionalSigned = u32; - type DispatchInfo = (); type Pre = (); const IDENTIFIER: &'static str = "testextension"; fn additional_signed(&self) -> Result { @@ -270,9 +269,8 @@ mod tests { struct TestExtension2; impl sp_runtime::traits::SignedExtension for TestExtension2 { type AccountId = u32; - type Call = u32; + type Call = (); type AdditionalSigned = u32; - type DispatchInfo = (); type Pre = (); const IDENTIFIER: &'static str = "testextension2"; fn additional_signed(&self) -> Result { @@ -336,6 +334,7 @@ mod tests { mod event_module { use crate::dispatch::DispatchResult; + use crate::weights::SimpleDispatchInfo; pub trait Trait: super::system::Trait { type Balance; @@ -353,6 +352,7 @@ mod tests { pub struct Module for enum Call where origin: T::Origin { type Error = Error; + #[weight = SimpleDispatchInfo::default()] fn aux_0(_origin) -> DispatchResult { unreachable!() } } } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index ff73fdbedf8f8a2ba59056029510c06258e8be66..9d05ff0b2d6d94ac1db87a730fc39015720b9938 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -16,8 +16,9 @@ use sp_std::prelude::*; use sp_std::borrow::Borrow; -use codec::{Ref, FullCodec, FullEncode, Encode, EncodeLike, EncodeAppend}; -use crate::{storage::{self, unhashed}, hash::{StorageHasher, Twox128}, traits::Len}; +use codec::{Ref, FullCodec, FullEncode, Decode, Encode, EncodeLike, EncodeAppend}; +use crate::{storage::{self, unhashed}, traits::Len}; +use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -55,6 +56,22 @@ pub trait StorageDoubleMap { /// Storage prefix. Used for generating final key. fn storage_prefix() -> &'static [u8]; + /// The full prefix; just the hash of `module_prefix` concatenated to the hash of + /// `storage_prefix`. + fn prefix_hash() -> Vec { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + + let mut result = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + ); + + result.extend_from_slice(&module_prefix_hashed[..]); + result.extend_from_slice(&storage_prefix_hashed[..]); + + result + } + /// Convert an optional value retrieved from storage to the type queried. fn from_optional_value_to_query(v: Option) -> Self::Query; @@ -62,8 +79,7 @@ pub trait StorageDoubleMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the first part of the key used in top storage. - fn storage_double_map_final_key1(k1: KArg1) -> Vec - where + fn storage_double_map_final_key1(k1: KArg1) -> Vec where KArg1: EncodeLike, { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); @@ -82,19 +98,32 @@ pub trait StorageDoubleMap { } /// Generate the full key used in top storage. - fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec - where + fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec where KArg1: EncodeLike, KArg2: EncodeLike, { - let mut final_key = Self::storage_double_map_final_key1(k1); - final_key.extend_from_slice(k2.using_encoded(Self::Hasher2::hash).as_ref()); + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); + let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len() + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key1_hashed.as_ref()); + final_key.extend_from_slice(key2_hashed.as_ref()); + final_key } } -impl storage::StorageDoubleMap for G -where +impl storage::StorageDoubleMap for G where K1: FullEncode, K2: FullEncode, V: FullCodec, @@ -102,32 +131,28 @@ where { type Query = G::Query; - fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec - where + fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec where KArg1: EncodeLike, KArg2: EncodeLike, { Self::storage_double_map_final_key(k1, k2) } - fn contains_key(k1: KArg1, k2: KArg2) -> bool - where + fn contains_key(k1: KArg1, k2: KArg2) -> bool where KArg1: EncodeLike, KArg2: EncodeLike, { unhashed::exists(&Self::storage_double_map_final_key(k1, k2)) } - fn get(k1: KArg1, k2: KArg2) -> Self::Query - where + fn get(k1: KArg1, k2: KArg2) -> Self::Query where KArg1: EncodeLike, KArg2: EncodeLike, { G::from_optional_value_to_query(unhashed::get(&Self::storage_double_map_final_key(k1, k2))) } - fn take(k1: KArg1, k2: KArg2) -> Self::Query - where + fn take(k1: KArg1, k2: KArg2) -> Self::Query where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -137,8 +162,12 @@ where G::from_optional_value_to_query(value) } - fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) - where + fn swap( + x_k1: XKArg1, + x_k2: XKArg2, + y_k1: YKArg1, + y_k2: YKArg2 + ) where XKArg1: EncodeLike, XKArg2: EncodeLike, YKArg1: EncodeLike, @@ -160,8 +189,7 @@ where } } - fn insert(k1: KArg1, k2: KArg2, val: VArg) - where + fn insert(k1: KArg1, k2: KArg2, val: VArg) where KArg1: EncodeLike, KArg2: EncodeLike, VArg: EncodeLike, @@ -169,8 +197,7 @@ where unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) } - fn remove(k1: KArg1, k2: KArg2) - where + fn remove(k1: KArg1, k2: KArg2) where KArg1: EncodeLike, KArg2: EncodeLike, { @@ -181,8 +208,8 @@ where unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref()) } - fn iter_prefix(k1: KArg1) -> storage::PrefixIterator - where KArg1: ?Sized + EncodeLike + fn iter_prefix(k1: KArg1) -> storage::PrefixIterator where + KArg1: ?Sized + EncodeLike { let prefix = Self::storage_double_map_final_key1(k1); storage::PrefixIterator:: { @@ -192,8 +219,7 @@ where } } - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R - where + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R where KArg1: EncodeLike, KArg2: EncodeLike, F: FnOnce(&mut Self::Query) -> R, @@ -213,8 +239,7 @@ where k1: KArg1, k2: KArg2, items: Items, - ) -> Result<(), &'static str> - where + ) -> Result<(), &'static str> where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -246,8 +271,7 @@ where k1: KArg1, k2: KArg2, items: Items, - ) - where + ) where KArg1: EncodeLike, KArg2: EncodeLike, Item: Encode, @@ -260,10 +284,10 @@ where .unwrap_or_else(|_| Self::insert(k1, k2, items)); } - fn decode_len(key1: KArg1, key2: KArg2) -> Result - where KArg1: EncodeLike, - KArg2: EncodeLike, - V: codec::DecodeLength + Len, + fn decode_len(key1: KArg1, key2: KArg2) -> Result where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: codec::DecodeLength + Len, { let final_key = Self::storage_double_map_final_key(key1, key2); if let Some(v) = unhashed::get_raw(&final_key) { @@ -276,6 +300,135 @@ where Ok(len) } } + + fn migrate_keys< + OldHasher1: StorageHasher, + OldHasher2: StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >(key1: KeyArg1, key2: KeyArg2) -> Option { + let old_key = { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); + let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len() + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key1_hashed.as_ref()); + final_key.extend_from_slice(key2_hashed.as_ref()); + + final_key + }; + unhashed::take(old_key.as_ref()).map(|value| { + unhashed::put(Self::storage_double_map_final_key(key1, key2).as_ref(), &value); + value + }) + } +} + +/// Utility to iterate through items in a storage map. +pub struct MapIterator { + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, +} + +impl< + K: Decode + Sized, + V: Decode + Sized, + Hasher: ReversibleStorageHasher +> Iterator for MapIterator { + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next; + match unhashed::get::(&self.previous_key) { + Some(value) => { + if self.drain { + unhashed::kill(&self.previous_key) + } + let mut key_material = Hasher::reverse(&self.previous_key[self.prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => Some((key, value)), + Err(_) => continue, + } + } + None => continue, + } + } + None => None, + } + } + } +} + +impl< + K1: FullCodec, + K2: FullCodec, + V: FullCodec, + G: StorageDoubleMap, +> storage::IterableStorageDoubleMap for G where + G::Hasher1: ReversibleStorageHasher, + G::Hasher2: ReversibleStorageHasher +{ + type Iterator = MapIterator; + + /// Enumerate all elements in the map. + fn iter(k1: impl EncodeLike) -> Self::Iterator { + let prefix = G::storage_double_map_final_key1(k1); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + _phantom: Default::default(), + } + } + + /// Enumerate all elements in the map. + fn drain(k1: impl EncodeLike) -> Self::Iterator { + let prefix = G::storage_double_map_final_key1(k1); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: true, + _phantom: Default::default(), + } + } + + fn translate Option>(f: F) { + let prefix = G::prefix_hash(); + let mut previous_key = prefix.clone(); + loop { + match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { + Some(next) => { + previous_key = next; + let maybe_value = unhashed::get::(&previous_key); + match maybe_value { + Some(value) => match f(value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + }, + None => continue, + } + } + None => return, + } + } + } } #[cfg(test)] diff --git a/frame/support/src/storage/generator/linked_map.rs b/frame/support/src/storage/generator/linked_map.rs deleted file mode 100644 index 2d0df8fcc810c99512b7b8c5b80bcecf07444d2b..0000000000000000000000000000000000000000 --- a/frame/support/src/storage/generator/linked_map.rs +++ /dev/null @@ -1,499 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use codec::{FullCodec, Encode, Decode, EncodeLike, Ref}; -use crate::{storage::{self, unhashed}, hash::{StorageHasher, Twox128}, traits::Len}; -use sp_std::{prelude::*, marker::PhantomData}; - -/// Generator for `StorageLinkedMap` used by `decl_storage`. -/// -/// By default final key generation rely on `KeyFormat`. -pub trait StorageLinkedMap { - /// The type that get/take returns. - type Query; - - /// The family of key formats used for this map. - type KeyFormat: KeyFormat; - - /// Convert an optional value retrieved from storage to the type queried. - fn from_optional_value_to_query(v: Option) -> Self::Query; - - /// Convert a query to an optional value into storage. - fn from_query_to_optional_value(v: Self::Query) -> Option; - - /// Generate the full key used in top storage. - fn storage_linked_map_final_key(key: KeyArg) -> Vec - where - KeyArg: EncodeLike, - { - ::storage_linked_map_final_key::(&key) - } - - /// Generate the hashed key for head - fn storage_linked_map_final_head_key() -> Vec { - ::storage_linked_map_final_head_key() - } -} - -/// A type-abstracted key format used for a family of linked-map types. -/// -/// # Default mapping of keys to a storage path -/// -/// The key for the head of the map is stored at one fixed path: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(head_prefix) -/// ``` -/// -/// For each key, the value stored under that key is appended with a -/// [`Linkage`](struct.Linkage.html) (which hold previous and next key) at the path: -/// ```nocompile -/// Twox128(module_prefix) ++ Twox128(storage_prefix) ++ Hasher(encode(key)) -/// ``` -/// -/// Enumeration is done by getting the head of the linked map and then iterating getting the -/// value and linkage stored at the key until the found linkage has no next key. -/// -/// # Warning -/// -/// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as -/// `blake2_256` must be used. Otherwise, other values in storage can be compromised. -pub trait KeyFormat { - /// Hasher. Used for generating final key and final head key. - type Hasher: StorageHasher; - - /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; - - /// Storage prefix. Used for generating final key. - fn storage_prefix() -> &'static [u8]; - - /// Storage prefix. Used for generating final head key. - fn head_prefix() -> &'static [u8]; - - /// Generate the full key used in top storage. - fn storage_linked_map_final_key(key: &K) -> Vec - where - K: Encode, - { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key_hashed = key.using_encoded(Self::Hasher::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); - - final_key - } - - /// Generate the full key used in top storage to store the head of the linked map. - fn storage_linked_map_final_head_key() -> Vec { - [ - Twox128::hash(Self::module_prefix()), - Twox128::hash(Self::head_prefix()), - ].concat() - } -} - -/// Linkage data of an element (it's successor and predecessor) -#[derive(Encode, Decode)] -pub struct Linkage { - /// Previous element key in storage (None for the first element) - pub previous: Option, - /// Next element key in storage (None for the last element) - pub next: Option, -} - -impl Default for Linkage { - fn default() -> Self { - Self { - previous: None, - next: None, - } - } -} - -// Encode like a linkage. -#[derive(Encode)] -struct EncodeLikeLinkage, NKey: EncodeLike, Key: Encode> { - // Previous element key in storage (None for the first element) - previous: Option, - // Next element key in storage (None for the last element) - next: Option, - // The key of the linkage this type encode to - phantom: core::marker::PhantomData, -} - -/// A key-value pair iterator for enumerable map. -pub struct Enumerator { - next: Option, - _phantom: PhantomData<(V, F)>, -} - -impl Enumerator { - /// Create an explicit enumerator for testing. - #[cfg(test)] - pub fn from_head(head: K) -> Self { - Enumerator { - next: Some(head), - _phantom: Default::default(), - } - } -} - -impl Iterator for Enumerator -where - K: FullCodec, - V: FullCodec, - F: KeyFormat, -{ - type Item = (K, V); - - fn next(&mut self) -> Option { - let next = self.next.take()?; - - let (val, linkage): (V, Linkage) = { - let next_full_key = F::storage_linked_map_final_key(&next); - match read_with_linkage::(next_full_key.as_ref()) { - Some(value) => value, - None => { - // TODO #3700: error should be handleable. - runtime_print!( - "ERROR: Corrupted state: linked map {:?}{:?}: \ - next value doesn't exist at {:?}", - F::module_prefix(), F::storage_prefix(), next_full_key, - ); - return None - } - } - }; - - self.next = linkage.next; - Some((next, val)) - } -} - -/// Update linkage when this element is removed. -/// -/// Takes care of updating previous and next elements points -/// as well as updates head if the element is first or last. -fn remove_linkage(linkage: Linkage) -where - K: FullCodec, - V: FullCodec, - F: KeyFormat, -{ - let next_key = linkage.next.as_ref().map(|k| F::storage_linked_map_final_key(k)); - let prev_key = linkage.previous.as_ref().map(|k| F::storage_linked_map_final_key(k)); - - if let Some(prev_key) = prev_key { - // Retrieve previous element and update `next` - if let Some(mut res) = read_with_linkage::(prev_key.as_ref()) { - res.1.next = linkage.next; - unhashed::put(prev_key.as_ref(), &res); - } else { - // TODO #3700: error should be handleable. - runtime_print!( - "ERROR: Corrupted state: linked map {:?}{:?}: \ - previous value doesn't exist at {:?}", - F::module_prefix(), F::storage_prefix(), prev_key, - ); - } - } else { - // we were first so let's update the head - write_head::<&K, K, F>(linkage.next.as_ref()); - } - if let Some(next_key) = next_key { - // Update previous of next element - if let Some(mut res) = read_with_linkage::(next_key.as_ref()) { - res.1.previous = linkage.previous; - unhashed::put(next_key.as_ref(), &res); - } else { - // TODO #3700: error should be handleable. - runtime_print!( - "ERROR: Corrupted state: linked map {:?}{:?}: \ - next value doesn't exist at {:?}", - F::module_prefix(), F::storage_prefix(), next_key, - ); - } - } -} - -/// Read the contained data and its linkage. -pub(super) fn read_with_linkage(key: &[u8]) -> Option<(V, Linkage)> -where - K: Decode, - V: Decode, -{ - unhashed::get(key) -} - -/// Generate linkage for newly inserted element. -/// -/// Takes care of updating head and previous head's pointer. -pub(super) fn new_head_linkage(key: KeyArg) -> Linkage -where - KeyArg: EncodeLike, - K: FullCodec, - V: FullCodec, - F: KeyFormat, -{ - if let Some(head) = read_head::() { - // update previous head predecessor - { - let head_key = F::storage_linked_map_final_key(&head); - if let Some((data, linkage)) = read_with_linkage::(head_key.as_ref()) { - let new_linkage = EncodeLikeLinkage::<_, _, K> { - previous: Some(Ref::from(&key)), - next: linkage.next.as_ref(), - phantom: Default::default(), - }; - unhashed::put(head_key.as_ref(), &(data, new_linkage)); - } else { - // TODO #3700: error should be handleable. - runtime_print!( - "ERROR: Corrupted state: linked map {:?}{:?}: \ - head value doesn't exist at {:?}", - F::module_prefix(), F::storage_prefix(), head_key, - ); - // Thus we consider we are first - update the head and produce empty linkage - - write_head::<_, _, F>(Some(key)); - return Linkage::default(); - } - } - // update to current head - write_head::<_, _, F>(Some(key)); - // return linkage with pointer to previous head - let mut linkage = Linkage::default(); - linkage.next = Some(head); - linkage - } else { - // we are first - update the head and produce empty linkage - write_head::<_, _, F>(Some(key)); - Linkage::default() - } -} - -/// Read current head pointer. -pub(crate) fn read_head() -> Option -where - K: Decode, - F: KeyFormat, -{ - unhashed::get(F::storage_linked_map_final_head_key().as_ref()) -} - -/// Overwrite current head pointer. -/// -/// If `None` is given head is removed from storage. -pub(super) fn write_head(head: Option) -where - KeyArg: EncodeLike, - K: FullCodec, - F: KeyFormat, -{ - match head.as_ref() { - Some(head) => unhashed::put(F::storage_linked_map_final_head_key().as_ref(), head), - None => unhashed::kill(F::storage_linked_map_final_head_key().as_ref()), - } -} - -impl storage::StorageLinkedMap for G -where - K: FullCodec, - V: FullCodec, - G: StorageLinkedMap, -{ - type Query = G::Query; - - type Enumerator = Enumerator; - - fn contains_key>(key: KeyArg) -> bool { - unhashed::exists(Self::storage_linked_map_final_key(key).as_ref()) - } - - fn get>(key: KeyArg) -> Self::Query { - let val = unhashed::get(Self::storage_linked_map_final_key(key).as_ref()); - G::from_optional_value_to_query(val) - } - - fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { - let final_key1 = Self::storage_linked_map_final_key(Ref::from(&key1)); - let final_key2 = Self::storage_linked_map_final_key(Ref::from(&key2)); - let full_value_1 = read_with_linkage::(final_key1.as_ref()); - let full_value_2 = read_with_linkage::(final_key2.as_ref()); - - match (full_value_1, full_value_2) { - // Just keep linkage in order and only swap values. - (Some((value1, linkage1)), Some((value2, linkage2))) => { - unhashed::put(final_key1.as_ref(), &(value2, linkage1)); - unhashed::put(final_key2.as_ref(), &(value1, linkage2)); - } - // Remove key and insert the new one. - (Some((value, _linkage)), None) => { - Self::remove(key1); - let linkage = new_head_linkage::<_, _, V, G::KeyFormat>(key2); - unhashed::put(final_key2.as_ref(), &(value, linkage)); - } - // Remove key and insert the new one. - (None, Some((value, _linkage))) => { - Self::remove(key2); - let linkage = new_head_linkage::<_, _, V, G::KeyFormat>(key1); - unhashed::put(final_key1.as_ref(), &(value, linkage)); - } - // No-op. - (None, None) => (), - } - } - - fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { - let final_key = Self::storage_linked_map_final_key(Ref::from(&key)); - let linkage = match read_with_linkage::<_, V>(final_key.as_ref()) { - // overwrite but reuse existing linkage - Some((_data, linkage)) => linkage, - // create new linkage - None => new_head_linkage::<_, _, V, G::KeyFormat>(key), - }; - unhashed::put(final_key.as_ref(), &(val, linkage)) - } - - fn remove>(key: KeyArg) { - G::take(key); - } - - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { - let final_key = Self::storage_linked_map_final_key(Ref::from(&key)); - - let (mut val, _linkage) = read_with_linkage::(final_key.as_ref()) - .map(|(data, linkage)| (G::from_optional_value_to_query(Some(data)), Some(linkage))) - .unwrap_or_else(|| (G::from_optional_value_to_query(None), None)); - - let ret = f(&mut val); - match G::from_query_to_optional_value(val) { - Some(ref val) => G::insert(key, val), - None => G::remove(key), - } - ret - } - - fn take>(key: KeyArg) -> Self::Query { - let final_key = Self::storage_linked_map_final_key(key); - - let full_value: Option<(V, Linkage)> = unhashed::take(final_key.as_ref()); - - let value = full_value.map(|(data, linkage)| { - remove_linkage::(linkage); - data - }); - - G::from_optional_value_to_query(value) - } - - fn enumerate() -> Self::Enumerator { - Enumerator::<_, _, G::KeyFormat> { - next: read_head::<_, G::KeyFormat>(), - _phantom: Default::default(), - } - } - - fn head() -> Option { - read_head::<_, G::KeyFormat>() - } - - fn decode_len>(key: KeyArg) -> Result - where V: codec::DecodeLength + Len - { - let key = Self::storage_linked_map_final_key(key); - if let Some(v) = unhashed::get_raw(key.as_ref()) { - ::len(&v).map_err(|e| e.what()) - } else { - let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) - .map(|v| v.len()) - .unwrap_or(0); - - Ok(len) - } - } - - /// The translation happens in-place, new keys are inserted at the same time as old keys are - /// removed, thus new keys must not collide with still remaining old keys. - fn translate(translate_key: TK, translate_val: TV) -> Result<(), Option> - where K2: FullCodec + Clone, V2: Decode, TK: Fn(K2) -> K, TV: Fn(V2) -> V - { - let head_key = read_head::().ok_or(None)?; - - let mut last_key = None; - let mut current_key = head_key.clone(); - - write_head::<&K, K, G::KeyFormat>(Some(&translate_key(head_key))); - - let translate_linkage = |old: Linkage| -> Linkage { - Linkage { - previous: old.previous.map(&translate_key), - next: old.next.map(&translate_key), - } - }; - - loop { - let old_raw_key = G::KeyFormat::storage_linked_map_final_key(¤t_key); - let x = unhashed::take(old_raw_key.as_ref()); - let (val, linkage): (V2, Linkage) = match x { - Some(v) => v, - None => { - // we failed to read value and linkage. Update the last key's linkage - // to end the map early, since it's impossible to iterate further. - if let Some(last_key) = last_key { - let last_raw_key = G::storage_linked_map_final_key(&last_key); - if let Some((val, mut linkage)) - = read_with_linkage::(last_raw_key.as_ref()) - { - // defensive: should always happen, since it was just written - // in the last iteration of the loop. - linkage.next = None; - unhashed::put(last_raw_key.as_ref(), &(&val, &linkage)); - } - } - - return Err(Some(current_key)); - } - }; - let next = linkage.next.clone(); - - let val = translate_val(val); - let linkage = translate_linkage(linkage); - - // and write in the value and linkage under the new key. - let new_key = translate_key(current_key.clone()); - let new_raw_key = G::storage_linked_map_final_key(&new_key); - unhashed::put(new_raw_key.as_ref(), &(&val, &linkage)); - - match next { - None => break, - Some(next) => { - last_key = Some(new_key); - current_key = next - }, - } - } - - Ok(()) - } -} diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index 497b3fd4777d771ac053253d7639c56045c077d9..c29a9a223aacf578d162864ef9a119aa015e78bc 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -17,8 +17,9 @@ #[cfg(not(feature = "std"))] use sp_std::prelude::*; use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Encode, EncodeLike, Ref, EncodeAppend}; -use crate::{storage::{self, unhashed}, hash::{StorageHasher, Twox128}, traits::Len}; +use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike, Ref, EncodeAppend}; +use crate::{storage::{self, unhashed}, traits::Len}; +use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// Generator for `StorageMap` used by `decl_storage`. /// @@ -44,6 +45,22 @@ pub trait StorageMap { /// Storage prefix. Used for generating final key. fn storage_prefix() -> &'static [u8]; + /// The full prefix; just the hash of `module_prefix` concatenated to the hash of + /// `storage_prefix`. + fn prefix_hash() -> Vec { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + + let mut result = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + ); + + result.extend_from_slice(&module_prefix_hashed[..]); + result.extend_from_slice(&storage_prefix_hashed[..]); + + result + } + /// Convert an optional value retrieved from storage to the type queried. fn from_optional_value_to_query(v: Option) -> Self::Query; @@ -51,8 +68,7 @@ pub trait StorageMap { fn from_query_to_optional_value(v: Self::Query) -> Option; /// Generate the full key used in top storage. - fn storage_map_final_key(key: KeyArg) -> Vec - where + fn storage_map_final_key(key: KeyArg) -> Vec where KeyArg: EncodeLike, { let module_prefix_hashed = Twox128::hash(Self::module_prefix()); @@ -71,6 +87,107 @@ pub trait StorageMap { } } +/// Utility to iterate through items in a storage map. +pub struct StorageMapIterator { + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, +} + +impl< + K: Decode + Sized, + V: Decode + Sized, + Hasher: ReversibleStorageHasher +> Iterator for StorageMapIterator { + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next; + match unhashed::get::(&self.previous_key) { + Some(value) => { + if self.drain { + unhashed::kill(&self.previous_key) + } + let mut key_material = Hasher::reverse(&self.previous_key[self.prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => Some((key, value)), + Err(_) => continue, + } + } + None => continue, + } + } + None => None, + } + } + } +} + +impl< + K: FullCodec, + V: FullCodec, + G: StorageMap, +> storage::IterableStorageMap for G where + G::Hasher: ReversibleStorageHasher +{ + type Iterator = StorageMapIterator; + + /// Enumerate all elements in the map. + fn iter() -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + _phantom: Default::default(), + } + } + + /// Enumerate all elements in the map. + fn drain() -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: true, + _phantom: Default::default(), + } + } + + fn translate Option>(f: F) { + let prefix = G::prefix_hash(); + let mut previous_key = prefix.clone(); + loop { + match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { + Some(next) => { + previous_key = next; + let maybe_value = unhashed::get::(&previous_key); + match maybe_value { + Some(value) => { + let mut key_material = G::Hasher::reverse(&previous_key[prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => match f(key, value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + }, + Err(_) => continue, + } + } + None => continue, + } + } + None => return, + } + } + } +} + impl> storage::StorageMap for G { type Query = G::Query; @@ -228,4 +345,26 @@ impl> storage::StorageMap Ok(len) } } + + fn migrate_key>(key: KeyArg) -> Option { + let old_key = { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key_hashed = key.borrow().using_encoded(OldHasher::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + }; + unhashed::take(old_key.as_ref()).map(|value| { + unhashed::put(Self::storage_map_final_key(key).as_ref(), &value); + value + }) + } } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 2bee76babdc40ca152b398f6aa750d976e829cb5..687d8a3c9361ba1ca217ee22947a349ae9095955 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -23,23 +23,20 @@ //! //! This is internal api and is subject to change. -mod linked_map; mod map; mod double_map; mod value; -pub use linked_map::{StorageLinkedMap, Enumerator, Linkage, KeyFormat as LinkedMapKeyFormat}; pub use map::StorageMap; pub use double_map::StorageDoubleMap; pub use value::StorageValue; - #[cfg(test)] #[allow(dead_code)] mod tests { use sp_io::TestExternalities; - use codec::{Encode, Decode}; - use crate::storage::{unhashed, generator::{StorageValue, StorageLinkedMap}}; + use codec::Encode; + use crate::storage::{unhashed, generator::StorageValue, IterableStorageMap}; struct Runtime {} pub trait Trait { @@ -56,16 +53,10 @@ mod tests { pub struct Module for enum Call where origin: T::Origin {} } - #[derive(Encode, Decode, Clone, Debug, Eq, PartialEq)] - struct NumberNumber { - a: u32, - b: u32, - } - crate::decl_storage! { trait Store for Module as Runtime { Value get(fn value) config(): (u64, u64); - NumberMap: linked_map hasher(blake2_256) NumberNumber => u64; + NumberMap: map hasher(identity) u32 => u64; } } @@ -89,41 +80,25 @@ mod tests { } #[test] - fn linked_map_translate_works() { - use super::linked_map::{self, Enumerator, KeyFormat}; - - type Format = >::KeyFormat; - + fn map_translate_works() { let t = GenesisConfig::default().build_storage().unwrap(); TestExternalities::new(t).execute_with(|| { // start with a map of u32 -> u32. for i in 0u32..100u32 { - let final_key = ::storage_linked_map_final_key(&i); - - let linkage = linked_map::new_head_linkage::<_, u32, u32, Format>(&i); - unhashed::put(final_key.as_ref(), &(&i, linkage)); + unhashed::put(&NumberMap::hashed_key_for(&i), &(i as u64)); } - let head = linked_map::read_head::().unwrap(); - assert_eq!( - Enumerator::::from_head(head).collect::>(), - (0..100).rev().map(|x| (x, x)).collect::>(), + NumberMap::iter().collect::>(), + (0..100).map(|x| (x as u32, x as u64)).collect::>(), ); // do translation. - NumberMap::translate( - |k: u32| NumberNumber { a: k, b: k }, - |v: u32| (v as u64) << 32 | v as u64, - ).unwrap(); + NumberMap::translate(|k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }); - assert!(linked_map::read_head::().is_some()); assert_eq!( - NumberMap::enumerate().collect::>(), - (0..100u32).rev().map(|x| ( - NumberNumber { a: x, b: x }, - (x as u64) << 32 | x as u64, - )).collect::>(), + NumberMap::iter().collect::>(), + (0..50u32).map(|x| x * 2).map(|x| (x, (x as u64) << 32 | x as u64)).collect::>(), ); }) } diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index df9758ba69839d4d103acd477601d02c39828369..8e6beefa8886e702fbd0536bfaa75d86d5f505b9 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -19,6 +19,7 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; use crate::{StorageHasher, Twox128}; +use crate::hash::ReversibleStorageHasher; /// Utility to iterate through raw items in storage. pub struct StorageIterator { @@ -78,6 +79,72 @@ impl Iterator for StorageIterator { } } +/// Utility to iterate through raw items in storage. +pub struct StorageKeyIterator { + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData<(K, T, H)>, +} + +impl StorageKeyIterator { + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn new(module: &[u8], item: &[u8]) -> Self { + Self::with_suffix(module, item, &[][..]) + } + + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&Twox128::hash(module)); + prefix.extend_from_slice(&Twox128::hash(item)); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + Self { prefix, previous_key, drain: false, _phantom: Default::default() } + } + + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } +} + +impl Iterator + for StorageKeyIterator +{ + type Item = (K, T); + + fn next(&mut self) -> Option<(K, T)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next.clone(); + let mut key_material = H::reverse(&next[self.prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => { + let maybe_value = frame_support::storage::unhashed::get::(&next); + match maybe_value { + Some(value) => { + if self.drain { + frame_support::storage::unhashed::kill(&next); + } + Some((key, value)) + } + None => continue, + } + } + Err(_) => continue, + } + } + None => None, + } + } + } +} + /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { get_storage_value::<()>(module, item, hash).is_some() @@ -109,3 +176,21 @@ pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], val key[32..].copy_from_slice(hash); frame_support::storage::unhashed::put(&key, &value); } + +/// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. +pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { + let mut key = vec![0u8; 32 + hash.len()]; + key[0..16].copy_from_slice(&Twox128::hash(module)); + key[16..32].copy_from_slice(&Twox128::hash(item)); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::kill_prefix(&key) +} + +/// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. +pub fn take_storage_item( + module: &[u8], + item: &[u8], + key: K, +) -> Option { + take_storage_value(module, item, key.using_encoded(H::hash).as_ref()) +} diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index e5d845cb22a957a7a3d1f7f68efbdf7550c9aa55..efec36b540abcc4ec8ab695e233919eeaf2de5b2 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -202,78 +202,60 @@ pub trait StorageMap { /// function for this purpose. fn decode_len>(key: KeyArg) -> Result where V: codec::DecodeLength + Len; -} - -/// A strongly-typed linked map in storage. -/// -/// Similar to `StorageMap` but allows to enumerate other elements and doesn't implement append. -/// -/// Details on implementation can be found at -/// [`generator::StorageLinkedMap`] -pub trait StorageLinkedMap { - /// The type that get/take return. - type Query; - /// The type that iterates over all `(key, value)`. - type Enumerator: Iterator; - - /// Does the value (explicitly) exist in storage? - fn contains_key>(key: KeyArg) -> bool; - - /// Load the value associated with the given key from the map. - fn get>(key: KeyArg) -> Self::Query; + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_key>(key: KeyArg) -> Option; - /// Swap the values of two keys. - fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2); + /// Migrate an item with the given `key` from a `blake2_256` hasher to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_key_from_blake>(key: KeyArg) -> Option { + Self::migrate_key::(key) + } +} - /// Store a value to be associated with the given key from the map. - fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg); +/// A strongly-typed map in storage whose keys and values can be iterated over. +pub trait IterableStorageMap: StorageMap { + /// The type that iterates over all `(key, value)`. + type Iterator: Iterator; - /// Remove the value under a key. - fn remove>(key: KeyArg); + /// Enumerate all elements in the map in no particular order. If you alter the map while doing + /// this, you'll get undefined results. + fn iter() -> Self::Iterator; - /// Mutate the value under a key. - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R; + /// Remove all elements from the map and iterate through them in no particular order. If you + /// add elements to the map while doing this, you'll get undefined results. + fn drain() -> Self::Iterator; - /// Take the value under a key. - fn take>(key: KeyArg) -> Self::Query; + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + fn translate Option>(f: F); +} - /// Return current head element. - fn head() -> Option; +/// A strongly-typed double map in storage whose secondary keys and values can be iterated over. +pub trait IterableStorageDoubleMap< + K1: FullCodec, + K2: FullCodec, + V: FullCodec +>: StorageDoubleMap { + /// The type that iterates over all `(key, value)`. + type Iterator: Iterator; - /// Enumerate all elements in the map. - fn enumerate() -> Self::Enumerator; + /// Enumerate all elements in the map with first key `k1` in no particular order. If you add or + /// remove values whose first key is `k1` to the map while doing this, you'll get undefined + /// results. + fn iter(k1: impl EncodeLike) -> Self::Iterator; - /// Read the length of the value in a fast way, without decoding the entire value. - /// - /// `T` is required to implement `Codec::DecodeLength`. - /// - /// Note that `0` is returned as the default value if no encoded value exists at the given key. - /// Therefore, this function cannot be used as a sign of _existence_. use the `::contains_key()` - /// function for this purpose. - fn decode_len>(key: KeyArg) -> Result - where V: codec::DecodeLength + Len; + /// Remove all elements from the map with first key `k1` and iterate through them in no + /// particular order. If you add elements with first key `k1` to the map while doing this, + /// you'll get undefined results. + fn drain(k1: impl EncodeLike) -> Self::Iterator; - /// Translate the keys and values from some previous `(K2, V2)` to the current type. - /// - /// `TK` translates keys from the old type, and `TV` translates values. - /// - /// Returns `Err` if the map could not be interpreted as the old type, and Ok if it could. - /// The `Err` contains the first key which could not be migrated, or `None` if the - /// head of the list could not be read. - /// - /// # Warning - /// - /// This function must be used with care, before being updated the storage still contains the - /// old type, thus other calls (such as `get`) will fail at decoding it. - /// - /// # Usage - /// - /// This would typically be called inside the module implementation of on_runtime_upgrade, while - /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More - /// precisely prior initialized modules doesn't make use of this storage). - fn translate(translate_key: TK, translate_val: TV) -> Result<(), Option> - where K2: FullCodec + Clone, V2: Decode, TK: Fn(K2) -> K, TV: Fn(V2) -> V; + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + fn translate Option>(f: F); } /// An implementation of a map with a two keys. @@ -377,6 +359,17 @@ pub trait StorageDoubleMap { KArg1: EncodeLike, KArg2: EncodeLike, V: codec::DecodeLength + Len; + + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and + /// `OldHasher2` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_keys< + OldHasher1: StorageHasher, + OldHasher2: StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >(key1: KeyArg1, key2: KeyArg2) -> Option; } /// Iterator for prefixed map. @@ -440,7 +433,7 @@ pub trait StoragePrefixedMap { } /// Iter over all value of the storage. - fn iter() -> PrefixIterator { + fn iter_values() -> PrefixIterator { let prefix = Self::final_prefix(); PrefixIterator { prefix: prefix.to_vec(), @@ -535,26 +528,26 @@ mod test { assert_eq!(MyStorage::final_prefix().to_vec(), k); // test iteration - assert_eq!(MyStorage::iter().collect::>(), vec![]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u64); unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); unhashed::put(&[&k[..], &vec![8][..]].concat(), &3u64); unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u64); - assert_eq!(MyStorage::iter().collect::>(), vec![1, 2, 3, 4]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3, 4]); // test removal MyStorage::remove_all(); - assert_eq!(MyStorage::iter().collect::>(), vec![]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); // test migration unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u32); unhashed::put(&[&k[..], &vec![8][..]].concat(), &2u32); - assert_eq!(MyStorage::iter().collect::>(), vec![]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); MyStorage::translate_values(|v: u32| v as u64).unwrap(); - assert_eq!(MyStorage::iter().collect::>(), vec![1, 2]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); MyStorage::remove_all(); // test migration 2 @@ -564,9 +557,9 @@ mod test { unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u32); // (contains some value that successfully decoded to u64) - assert_eq!(MyStorage::iter().collect::>(), vec![1, 2, 3]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); assert_eq!(MyStorage::translate_values(|v: u128| v as u64), Err(2)); - assert_eq!(MyStorage::iter().collect::>(), vec![1, 3]); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 3]); MyStorage::remove_all(); // test that other values are not modified. diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 2e87629a9f02e86671c5fc7d726703e15401bf83..7e2040ee234a75fbcd2b488b7ebb40c64bfbefd7 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -19,16 +19,17 @@ //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; -use codec::{FullCodec, Codec, Encode, Decode}; +use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; use sp_core::u32_trait::Value as U32; use sp_runtime::{ - RuntimeDebug, - ConsensusEngineId, DispatchResult, DispatchError, - traits::{MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput}, + RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, traits::{ + MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, + BadOrigin + }, }; - use crate::dispatch::Parameter; use crate::storage::StorageMap; +use impl_trait_for_tuples::impl_for_tuples; /// An abstraction of a value stored within storage, but possibly as part of a larger composite /// item. @@ -87,7 +88,7 @@ impl< Created: Happened, Removed: Happened, K: FullCodec, - T: FullCodec + T: FullCodec, > StoredMap for StorageMapShim { fn get(k: &K) -> T { S::get(k) } fn is_explicit(k: &K) -> bool { S::contains_key(k) } @@ -138,6 +139,35 @@ impl< } } +/// Something that can estimate at which block the next session rotation will happen. This should +/// be the same logical unit that dictates `ShouldEndSession` to the session module. No Assumptions +/// are made about the scheduling of the sessions. +pub trait EstimateNextSessionRotation { + /// Return the block number at which the next session rotation is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer + fn estimate_next_session_rotation(now: BlockNumber) -> Option; +} + +impl EstimateNextSessionRotation for () { + fn estimate_next_session_rotation(_: BlockNumber) -> Option { + Default::default() + } +} + +/// Something that can estimate at which block the next `new_session` will be triggered. This must +/// always be implemented by the session module. +pub trait EstimateNextNewSession { + /// Return the block number at which the next new session is estimated to happen. + fn estimate_next_new_session(now: BlockNumber) -> Option; +} + +impl EstimateNextNewSession for () { + fn estimate_next_new_session(_: BlockNumber) -> Option { + Default::default() + } +} + /// Anything that can have a `::len()` method. pub trait Len { /// Return the length of data type. @@ -172,6 +202,13 @@ pub trait Contains { /// Get the number of items in the set. fn count() -> usize { Self::sorted_members().len() } + + /// Add an item that would satisfy `contains`. It does not make sure any other + /// state is correctly maintained or generated. + /// + /// **Should be used for benchmarking only!!!** + #[cfg(feature = "runtime-benchmarks")] + fn add(t: &T); } /// Determiner to say whether a given account is unused. @@ -187,14 +224,14 @@ impl IsDeadAccount for () { } /// Handler for when a new account has been created. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[impl_for_tuples(30)] pub trait OnNewAccount { /// A new account `who` has been registered. fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. -#[impl_trait_for_tuples::impl_for_tuples(30)] +#[impl_for_tuples(30)] pub trait OnKilledAccount { /// The account with the given id was reaped. fn on_killed_account(who: &AccountId); @@ -851,6 +888,12 @@ pub trait Time { fn now() -> Self::Moment; } +/// Trait to deal with unix time. +pub trait UnixTime { + /// Return duration since `SystemTime::UNIX_EPOCH`. + fn now() -> core::time::Duration; +} + impl WithdrawReasons { /// Choose all variants except for `one`. /// @@ -990,6 +1033,21 @@ impl Randomness for () { } } +/// Trait to be used by block producing consensus engine modules to determine +/// how late the current block is (e.g. in a slot-based proposal mechanism how +/// many slots were skipped since the previous block). +pub trait Lateness { + /// Returns a generic measure of how late the current block is compared to + /// its parent. + fn lateness(&self) -> N; +} + +impl Lateness for () { + fn lateness(&self) -> N { + Zero::zero() + } +} + /// Implementors of this trait provide information about whether or not some validator has /// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. pub trait ValidatorRegistration { @@ -1036,3 +1094,187 @@ pub trait GetCallMetadata { /// Return a [`CallMetadata`], containing function and pallet name of the Call. fn get_call_metadata(&self) -> CallMetadata; } + +/// The block finalization trait. Implementing this lets you express what should happen +/// for your module when the block is ending. +#[impl_for_tuples(30)] +pub trait OnFinalize { + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} +} + +/// The block initialization trait. Implementing this lets you express what should happen +/// for your module when the block is beginning (right before the first extrinsic is executed). +pub trait OnInitialize { + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } +} + +#[impl_for_tuples(30)] +impl OnInitialize for Tuple { + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(_n.clone())); )* ); + weight + } +} + +/// The runtime upgrade trait. Implementing this lets you express what should happen +/// when the runtime upgrades, and changes may need to occur to your module. +pub trait OnRuntimeUpgrade { + /// Perform a module upgrade. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { 0 } +} + +#[impl_for_tuples(30)] +impl OnRuntimeUpgrade for Tuple { + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); + weight + } +} + +/// Off-chain computation trait. +/// +/// Implementing this trait on a module allows you to perform long-running tasks +/// that make (by default) validators generate transactions that feed results +/// of those long-running computations back on chain. +/// +/// NOTE: This function runs off-chain, so it can access the block state, +/// but cannot preform any alterations. More specifically alterations are +/// not forbidden, but they are not persisted in any way after the worker +/// has finished. +#[impl_for_tuples(30)] +pub trait OffchainWorker { + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} +} + +pub mod schedule { + use super::*; + + /// Information relating to the period of a scheduled task. First item is the length of the + /// period and the second is the number of times it should be executed in total before the task + /// is considered finished and removed. + pub type Period = (BlockNumber, u32); + + /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning + /// higher priority. + pub type Priority = u8; + + /// The highest priority. We invert the value so that normal sorting will place the highest + /// priority at the beginning of the list. + pub const HIGHEST_PRORITY: Priority = 0; + /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even + /// if it breaches the `MaximumWeight` limitation. + pub const HARD_DEADLINE: Priority = 63; + /// The lowest priority. Most stuff should be around here. + pub const LOWEST_PRORITY: Priority = 255; + + /// A type that can be used as a scheduler. + pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + Debug; + + /// Schedule a one-off dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + /// + /// Infallible. + fn schedule( + when: BlockNumber, + maybe_periodic: Option>, + priority: Priority, + call: Call + ) -> Self::Address; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), ()>; + } + + /// A type that can be used as a scheduler. + pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a one-off dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: impl Encode, + when: BlockNumber, + maybe_periodic: Option>, + priority: Priority, + call: Call + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: impl Encode) -> Result<(), ()>; + } +} + +/// Some sort of check on the origin is performed by this object. +pub trait EnsureOrigin { + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> result::Result { + Self::try_origin(o).map_err(|_| BadOrigin) + } + /// Perform the origin check. + fn try_origin(o: OuterOrigin) -> result::Result; + + /// Returns an outer origin capable of passing `try_origin` check. + /// + /// ** Should be used for benchmarking only!!! ** + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> OuterOrigin; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + struct Test; + impl OnInitialize for Test { + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 + } + } + impl OnRuntimeUpgrade for Test { + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 + } + } + + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + } +} diff --git a/frame/support/src/unsigned.rs b/frame/support/src/unsigned.rs index 4289e4e474f53971f29f75208d9cf865b6e173a2..3bc6f692affc20426f196792666fe1c0413c146f 100644 --- a/frame/support/src/unsigned.rs +++ b/frame/support/src/unsigned.rs @@ -18,7 +18,7 @@ pub use crate::sp_runtime::traits::ValidateUnsigned; #[doc(hidden)] pub use crate::sp_runtime::transaction_validity::{ - TransactionValidity, UnknownTransaction, TransactionValidityError, + TransactionValidity, UnknownTransaction, TransactionValidityError, TransactionSource, }; @@ -34,7 +34,8 @@ pub use crate::sp_runtime::transaction_validity::{ /// # impl frame_support::unsigned::ValidateUnsigned for Module { /// # type Call = Call; /// # -/// # fn validate_unsigned(call: &Self::Call) -> frame_support::unsigned::TransactionValidity { +/// # fn validate_unsigned(_source: frame_support::unsigned::TransactionSource, _call: &Self::Call) +/// -> frame_support::unsigned::TransactionValidity { /// # unimplemented!(); /// # } /// # } @@ -78,10 +79,14 @@ macro_rules! impl_outer_validate_unsigned { } } - fn validate_unsigned(call: &Self::Call) -> $crate::unsigned::TransactionValidity { + fn validate_unsigned( + #[allow(unused_variables)] + source: $crate::unsigned::TransactionSource, + call: &Self::Call, + ) -> $crate::unsigned::TransactionValidity { #[allow(unreachable_patterns)] match call { - $( Call::$module(inner_call) => $module::validate_unsigned(inner_call), )* + $( Call::$module(inner_call) => $module::validate_unsigned(source, inner_call), )* _ => $crate::unsigned::UnknownTransaction::NoUnsignedValidator.into(), } } @@ -110,7 +115,10 @@ mod test_partial_and_full_call { impl super::super::ValidateUnsigned for Module { type Call = Call; - fn validate_unsigned(_call: &Self::Call) -> super::super::TransactionValidity { + fn validate_unsigned( + _source: super::super::TransactionSource, + _call: &Self::Call + ) -> super::super::TransactionValidity { unimplemented!(); } } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 8926ed949302b304935a5df7431eae09a886eb57..ea3368550f301b96f3dd82bc15daf2d000a5b60f 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -37,14 +37,14 @@ #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -use impl_trait_for_tuples::impl_for_tuples; use codec::{Encode, Decode}; -use sp_arithmetic::traits::{Bounded, Zero}; +use sp_arithmetic::traits::Bounded; use sp_runtime::{ RuntimeDebug, traits::SignedExtension, generic::{CheckedExtrinsic, UncheckedExtrinsic}, }; +use crate::dispatch::{DispatchErrorWithPostInfo, DispatchError}; /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; @@ -67,17 +67,6 @@ pub trait ClassifyDispatch { fn classify_dispatch(&self, target: T) -> DispatchClass; } -/// Means of determining the weight of a block's life cycle hooks: `on_initialize`, `on_finalize`, -/// `on_runtime_upgrade`, and such. -pub trait WeighBlock { - /// Return the weight of the block's on_runtime_upgrade hook. - fn on_runtime_upgrade() -> Weight { Zero::zero() } - /// Return the weight of the block's on_initialize hook. - fn on_initialize(_: BlockNumber) -> Weight { Zero::zero() } - /// Return the weight of the block's on_finalize hook. - fn on_finalize(_: BlockNumber) -> Weight { Zero::zero() } -} - /// Indicates if dispatch function should pay fees or not. /// If set to false, the block resource limits are applied, yet no fee is deducted. pub trait PaysFee { @@ -86,34 +75,6 @@ pub trait PaysFee { } } -/// Maybe I can do something to remove the duplicate code here. -#[impl_for_tuples(30)] -impl WeighBlock for SingleModule { - fn on_runtime_upgrade() -> Weight { - let mut accumulated_weight: Weight = Zero::zero(); - for_tuples!( - #( accumulated_weight = accumulated_weight.saturating_add(SingleModule::on_runtime_upgrade()); )* - ); - accumulated_weight - } - - fn on_initialize(n: BlockNumber) -> Weight { - let mut accumulated_weight: Weight = Zero::zero(); - for_tuples!( - #( accumulated_weight = accumulated_weight.saturating_add(SingleModule::on_initialize(n)); )* - ); - accumulated_weight - } - - fn on_finalize(n: BlockNumber) -> Weight { - let mut accumulated_weight: Weight = Zero::zero(); - for_tuples!( - #( accumulated_weight = accumulated_weight.saturating_add(SingleModule::on_finalize(n)); )* - ); - accumulated_weight - } -} - /// A generalized group of dispatch types. This is only distinguishing normal, user-triggered transactions /// (`Normal`) and anything beyond which serves a higher purpose to the system (`Operational`). #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] @@ -124,6 +85,19 @@ pub enum DispatchClass { Normal, /// An operational dispatch. Operational, + /// A mandatory dispatch. These kinds of dispatch are always included regardless of their + /// weight, therefore it is critical that they are separately validated to ensure that a + /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just means + /// ensuring that the extrinsic can only be included once and that it is always very light. + /// + /// Do *NOT* use it for extrinsics that can be heavy. + /// + /// The only real use case for this is inherent extrinsics that are required to execute in a + /// block for the block to be valid, and it solves the issue in the case that the block + /// initialization is sufficiently heavy to mean that those inherents do not fit into the + /// block. Essentially, we assume that in these exceptional circumstances, it is better to + /// allow an overweight block to be created than to not allow any block at all to be created. + Mandatory, } impl Default for DispatchClass { @@ -141,6 +115,8 @@ impl From for DispatchClass { SimpleDispatchInfo::FixedNormal(_) => DispatchClass::Normal, SimpleDispatchInfo::MaxNormal => DispatchClass::Normal, SimpleDispatchInfo::InsecureFreeNormal => DispatchClass::Normal, + + SimpleDispatchInfo::FixedMandatory(_) => DispatchClass::Mandatory, } } } @@ -156,6 +132,64 @@ pub struct DispatchInfo { pub pays_fee: bool, } +/// Weight information that is only available post dispatch. +#[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct PostDispatchInfo { + /// Actual weight consumed by a call or `None` which stands for the worst case static weight. + pub actual_weight: Option, +} + +impl From> for PostDispatchInfo { + fn from(actual_weight: Option) -> Self { + Self { + actual_weight, + } + } +} + +impl From<()> for PostDispatchInfo { + fn from(_: ()) -> Self { + Self { + actual_weight: None, + } + } +} + +impl sp_runtime::traits::Printable for PostDispatchInfo { + fn print(&self) { + "actual_weight=".print(); + match self.actual_weight { + Some(weight) => weight.print(), + None => "max-weight".print(), + } + } +} + +/// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables +/// that want to return a custom a posteriori weight on error. +pub trait WithPostDispatchInfo { + /// Call this on your modules custom errors type in order to return a custom weight on error. + /// + /// # Example + /// + /// ```ignore + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; + /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); + /// ``` + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; +} + +impl WithPostDispatchInfo for T where + T: Into +{ + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { actual_weight: Some(actual_weight) }, + error: self.into(), + } + } +} + /// A `Dispatchable` function (aka transaction) that can carry some static information along with /// it, using the `#[weight]` attribute. pub trait GetDispatchInfo { @@ -193,6 +227,11 @@ pub enum SimpleDispatchInfo { FixedOperational(Weight), /// An operational dispatch with the maximum weight. MaxOperational, + /// A mandatory dispatch with fixed weight. + /// + /// NOTE: Signed transactions may not (directly) dispatch this kind of a call, so the other + /// attributes concerning transactability (e.g. priority, fee paying) are moot. + FixedMandatory(Weight), } impl WeighData for SimpleDispatchInfo { @@ -201,9 +240,9 @@ impl WeighData for SimpleDispatchInfo { SimpleDispatchInfo::FixedNormal(w) => *w, SimpleDispatchInfo::MaxNormal => Bounded::max_value(), SimpleDispatchInfo::InsecureFreeNormal => Bounded::min_value(), - SimpleDispatchInfo::FixedOperational(w) => *w, SimpleDispatchInfo::MaxOperational => Bounded::max_value(), + SimpleDispatchInfo::FixedMandatory(w) => *w, } } } @@ -220,9 +259,9 @@ impl PaysFee for SimpleDispatchInfo { SimpleDispatchInfo::FixedNormal(_) => true, SimpleDispatchInfo::MaxNormal => true, SimpleDispatchInfo::InsecureFreeNormal => true, - SimpleDispatchInfo::FixedOperational(_) => true, SimpleDispatchInfo::MaxOperational => true, + SimpleDispatchInfo::FixedMandatory(_) => true, } } } diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 0a5595914b07b28ea416767753349ec253293dae..773523579b0b4ce8d580e5e80aad796591947fdf 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -10,13 +10,13 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-io ={ path = "../../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} -sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-io ={ path = "../../../primitives/io", default-features = false , version = "2.0.0-alpha.5"} +sp-state-machine = { version = "0.8.0-alpha.5", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../../primitives/core" } trybuild = "1.0.17" pretty_assertions = "0.6.1" @@ -32,3 +32,6 @@ std = [ "sp-runtime/std", "sp-state-machine", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/support/test/tests/decl_error.rs b/frame/support/test/tests/decl_error.rs index 39d7dbdb9647857ce1d054d53acac90fda42d085..4191e79f2417ca59d118e0265094fa4e9a5b8782 100644 --- a/frame/support/test/tests/decl_error.rs +++ b/frame/support/test/tests/decl_error.rs @@ -32,6 +32,7 @@ mod module1 { pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: ::Origin { + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { Err(Error::::Something.into()) } @@ -58,6 +59,7 @@ mod module2 { pub struct Module for enum Call where origin: ::Origin { + #[weight = frame_support::weights::SimpleDispatchInfo::default()] pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { Err(Error::::Something.into()) } diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index f5e3892d5f72405fb742a19ddd81b431a9cde386..ea9b09f9d7bc2c9001b70232facbb69d43270cc0 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -37,10 +37,10 @@ mod tests { // non-getters: pub / $default /// Hello, this is doc! - U32 : Option; - pub PUBU32 : Option; - U32MYDEF : Option; - pub PUBU32MYDEF : Option; + U32: Option; + pub PUBU32: Option; + U32MYDEF: Option; + pub PUBU32MYDEF: Option; // getters: pub / $default // we need at least one type which uses T, otherwise GenesisConfig will complain. @@ -59,31 +59,23 @@ mod tests { GetOptU32WithBuilderNone get(fn opt_u32_with_builder_none) build(|_| None): Option; // map non-getters: pub / $default - MAPU32 : map hasher(blake2_256) u32 => Option; - pub PUBMAPU32 : map hasher(blake2_256) u32 => Option; - MAPU32MYDEF : map hasher(blake2_256) u32 => Option; - pub PUBMAPU32MYDEF : map hasher(blake2_256) u32 => Option; + MAPU32: map hasher(blake2_128_concat) u32 => Option; + pub PUBMAPU32: map hasher(blake2_128_concat) u32 => Option; + MAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; + pub PUBMAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; // map getters: pub / $default - GETMAPU32 get(fn map_u32_getter): map hasher(blake2_256) u32 => String; - pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_256) u32 => String; + GETMAPU32 get(fn map_u32_getter): map hasher(blake2_128_concat) u32 => String; + pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_128_concat) u32 => String; GETMAPU32MYDEF get(fn map_u32_getter_mydef): - map hasher(blake2_256) u32 => String = "map".into(); + map hasher(blake2_128_concat) u32 => String = "map".into(); pub PUBGETMAPU32MYDEF get(fn pub_map_u32_getter_mydef): - map hasher(blake2_256) u32 => String = "pubmap".into(); - - // linked map - LINKEDMAPU32 : linked_map hasher(blake2_256) u32 => Option; - pub PUBLINKEDMAPU32MYDEF : linked_map hasher(blake2_256) u32 => Option; - GETLINKEDMAPU32 get(fn linked_map_u32_getter): - linked_map hasher(blake2_256) u32 => String; - pub PUBGETLINKEDMAPU32MYDEF get(fn pub_linked_map_u32_getter_mydef): - linked_map hasher(blake2_256) u32 => String = "pubmap".into(); + map hasher(blake2_128_concat) u32 => String = "pubmap".into(); COMPLEXTYPE1: ::std::vec::Vec<::Origin>; - COMPLEXTYPE2: (Vec)>>, u32); - COMPLEXTYPE3: [u32;25]; + COMPLEXTYPE2: (Vec)>>, u32); + COMPLEXTYPE3: [u32; 25]; } add_extra_genesis { build(|_| {}); @@ -249,10 +241,10 @@ mod tests { name: DecodeDifferent::Encode("MAPU32"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructMAPU32(PhantomData::)) @@ -263,10 +255,10 @@ mod tests { name: DecodeDifferent::Encode("PUBMAPU32"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructPUBMAPU32(PhantomData::)) @@ -277,10 +269,10 @@ mod tests { name: DecodeDifferent::Encode("MAPU32MYDEF"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructMAPU32MYDEF(PhantomData::)) @@ -291,10 +283,10 @@ mod tests { name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), modifier: StorageEntryModifier::Optional, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructPUBMAPU32MYDEF(PhantomData::)) @@ -305,10 +297,10 @@ mod tests { name: DecodeDifferent::Encode("GETMAPU32"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGETMAPU32(PhantomData::)) @@ -319,10 +311,10 @@ mod tests { name: DecodeDifferent::Encode("PUBGETMAPU32"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructPUBGETMAPU32(PhantomData::)) @@ -333,10 +325,10 @@ mod tests { name: DecodeDifferent::Encode("GETMAPU32MYDEF"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructGETMAPU32MYDEF(PhantomData::)) @@ -347,72 +339,16 @@ mod tests { name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Blake2_128Concat, key: DecodeDifferent::Encode("u32"), value: DecodeDifferent::Encode("String"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter(&__GetByteStructPUBGETMAPU32MYDEF(PhantomData::)) ), documentation: DecodeDifferent::Encode(&[]), }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("LINKEDMAPU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructLINKEDMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBLINKEDMAPU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBLINKEDMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETLINKEDMAPU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETLINKEDMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETLINKEDMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETLINKEDMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, StorageEntryMetadata { name: DecodeDifferent::Encode("COMPLEXTYPE1"), modifier: StorageEntryModifier::Default, @@ -562,17 +498,13 @@ mod test_append_and_len { JustVecWithDefault: Vec = vec![6, 9]; OptionVec: Option>; - MapVec: map hasher(blake2_256) u32 => Vec; - MapVecWithDefault: map hasher(blake2_256) u32 => Vec = vec![6, 9]; - OptionMapVec: map hasher(blake2_256) u32 => Option>; - - DoubleMapVec: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => Vec; - DoubleMapVecWithDefault: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => Vec = vec![6, 9]; - OptionDoubleMapVec: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => Option>; + MapVec: map hasher(blake2_128_concat) u32 => Vec; + MapVecWithDefault: map hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; + OptionMapVec: map hasher(blake2_128_concat) u32 => Option>; - LinkedMapVec: linked_map hasher(blake2_256) u32 => Vec; - LinkedMapVecWithDefault: linked_map hasher(blake2_256) u32 => Vec = vec![6, 9]; - OptionLinkedMapVec: linked_map hasher(blake2_256) u32 => Option>; + DoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec; + DoubleMapVecWithDefault: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; + OptionDoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Option>; } } @@ -644,13 +576,11 @@ mod test_append_and_len { JustVec::put(&vec![1, 2, 3, 4]); OptionVec::put(&vec![1, 2, 3, 4, 5]); MapVec::insert(1, &vec![1, 2, 3, 4, 5, 6]); - LinkedMapVec::insert(2, &vec![1, 2, 3]); DoubleMapVec::insert(0, 1, &vec![1, 2]); assert_eq!(JustVec::decode_len().unwrap(), 4); assert_eq!(OptionVec::decode_len().unwrap(), 5); assert_eq!(MapVec::decode_len(1).unwrap(), 6); - assert_eq!(LinkedMapVec::decode_len(2).unwrap(), 3); assert_eq!(DoubleMapVec::decode_len(0, 1).unwrap(), 2); }); } @@ -678,16 +608,6 @@ mod test_append_and_len { assert_eq!(OptionMapVec::get(0), None); assert_eq!(OptionMapVec::decode_len(0), Ok(0)); - // linked map - assert_eq!(LinkedMapVec::get(0), vec![]); - assert_eq!(LinkedMapVec::decode_len(0), Ok(0)); - - assert_eq!(LinkedMapVecWithDefault::get(0), vec![6, 9]); - assert_eq!(LinkedMapVecWithDefault::decode_len(0), Ok(2)); - - assert_eq!(OptionLinkedMapVec::get(0), None); - assert_eq!(OptionLinkedMapVec::decode_len(0), Ok(0)); - // Double map assert_eq!(DoubleMapVec::get(0, 0), vec![]); assert_eq!(DoubleMapVec::decode_len(0, 1), Ok(0)); diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index e604038f4ea30031129d4f118dcffe4b0c5e7686..ae23c5a64c200af647f0cd4387495742026ccc6c 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -16,8 +16,8 @@ use frame_support::storage::unhashed; use codec::Encode; -use frame_support::{StorageDoubleMap, StorageLinkedMap, StorageMap, StorageValue, StoragePrefixedMap}; -use sp_io::{TestExternalities, hashing::{twox_128, blake2_128, blake2_256}}; +use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedMap}; +use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; mod no_instance { use codec::{Encode, Decode, EncodeLike}; @@ -35,18 +35,15 @@ mod no_instance { trait Store for Module as FinalKeysNone { pub Value config(value): u32; - pub Map: map hasher(blake2_256) u32 => u32; - pub Map2: map hasher(twox_128) u32 => u32; + pub Map: map hasher(blake2_128_concat) u32 => u32; + pub Map2: map hasher(twox_64_concat) u32 => u32; - pub LinkedMap: linked_map hasher(blake2_256) u32 => u32; - pub LinkedMap2: linked_map hasher(twox_128) u32 => u32; - - pub DoubleMap: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => u32; - pub DoubleMap2: double_map hasher(twox_128) u32, hasher(blake2_128) u32 => u32; + pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; + pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; pub TestGenericValue get(fn test_generic_value) config(): Option; pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): - double_map hasher(blake2_256) u32, hasher(blake2_256) T::BlockNumber => Option; + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; } } } @@ -65,18 +62,15 @@ mod instance { { pub Value config(value): u32; - pub Map: map hasher(blake2_256) u32 => u32; - pub Map2: map hasher(twox_128) u32 => u32; - - pub LinkedMap: linked_map hasher(blake2_256) u32 => u32; - pub LinkedMap2: linked_map hasher(twox_128) u32 => u32; + pub Map: map hasher(blake2_128_concat) u32 => u32; + pub Map2: map hasher(twox_64_concat) u32 => u32; - pub DoubleMap: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => u32; - pub DoubleMap2: double_map hasher(twox_128) u32, hasher(blake2_128) u32 => u32; + pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; + pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; pub TestGenericValue get(fn test_generic_value) config(): Option; pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): - double_map hasher(blake2_256) u32, hasher(blake2_256) T::BlockNumber => Option; + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; } add_extra_genesis { // See `decl_storage` limitation. @@ -85,6 +79,18 @@ mod instance { } } +fn twox_64_concat(d: &[u8]) -> Vec { + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v +} + +fn blake2_128_concat(d: &[u8]) -> Vec { + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v +} + #[test] fn final_keys_no_instance() { TestExternalities::default().execute_with(|| { @@ -94,41 +100,27 @@ fn final_keys_no_instance() { no_instance::Map::insert(1, 2); let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &::final_prefix()); no_instance::Map2::insert(1, 2); let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &::final_prefix()); - let head = [twox_128(b"FinalKeysNone"), twox_128(b"HeadOfLinkedMap")].concat(); - assert_eq!(unhashed::get::(&head), None); - - no_instance::LinkedMap::insert(1, 2); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"LinkedMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(unhashed::get::(&head), Some(1u32)); - - no_instance::LinkedMap2::insert(1, 2); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"LinkedMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - no_instance::DoubleMap::insert(&1, &2, &3); let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - k.extend(2u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &::final_prefix()); no_instance::DoubleMap2::insert(&1, &2, &3); let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - k.extend(2u32.using_encoded(blake2_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &::final_prefix()); }); @@ -143,41 +135,27 @@ fn final_keys_default_instance() { >::insert(1, 2); let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); >::insert(1, 2); let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); - let head = [twox_128(b"FinalKeysSome"), twox_128(b"HeadOfLinkedMap")].concat(); - assert_eq!(unhashed::get::(&head), None); - - >::insert(1, 2); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"LinkedMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(unhashed::get::(&head), Some(1u32)); - - >::insert(1, 2); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"LinkedMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - >::insert(&1, &2, &3); let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - k.extend(2u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); >::insert(&1, &2, &3); let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - k.extend(2u32.using_encoded(blake2_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); }); @@ -192,41 +170,27 @@ fn final_keys_instance_2() { >::insert(1, 2); let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); >::insert(1, 2); let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(2u32)); assert_eq!(&k[..32], &>::final_prefix()); - let head = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"HeadOfLinkedMap")].concat(); - assert_eq!(unhashed::get::(&head), None); - - >::insert(1, 2); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"LinkedMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(unhashed::get::(&head), Some(1u32)); - - >::insert(1, 2); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"LinkedMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - assert_eq!(unhashed::get::(&k), Some(2u32)); - >::insert(&1, &2, &3); let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_256).to_vec()); - k.extend(2u32.using_encoded(blake2_256).to_vec()); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); >::insert(&1, &2, &3); let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_128).to_vec()); - k.extend(2u32.using_encoded(blake2_128).to_vec()); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); assert_eq!(unhashed::get::(&k), Some(3u32)); assert_eq!(&k[..32], &>::final_prefix()); }); diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index 5a5ab9d1dbcdd9fd4db78f7871e42a8fe2ca7f4d..bccffb737476b47fa9b8ebdccfce88139fb9370d 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -25,7 +25,7 @@ frame_support::decl_module! { frame_support::decl_storage! { trait Store for Module as Test { - pub AppendableDM config(t): double_map hasher(blake2_256) u32, hasher(blake2_256) T::BlockNumber => Vec; + pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; } } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 6fa2806dd348379008ddb2ea5f8ef7b91cd2d3c7..ea5d32fea3b222b754331aff047d65782b671911 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -23,7 +23,7 @@ use frame_support::{ DecodeDifferent, StorageMetadata, StorageEntryModifier, StorageEntryType, DefaultByteGetter, StorageEntryMetadata, StorageHasher, }, - StorageValue, StorageMap, StorageLinkedMap, StorageDoubleMap, + StorageValue, StorageMap, StorageDoubleMap, }; use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}; use sp_core::{H256, sr25519}; @@ -55,6 +55,7 @@ mod module1 { fn deposit_event() = default; + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn one(origin) { system::ensure_root(origin)?; Self::deposit_event(RawEvent::AnotherVariant(3)); @@ -67,8 +68,7 @@ mod module1 { T::BlockNumber: From + std::fmt::Display { pub Value config(value): T::GenericType; - pub Map: map hasher(blake2_256) u32 => u64; - pub LinkedMap: linked_map hasher(blake2_256) u32 => u64; + pub Map: map hasher(identity) u32 => u64; } add_extra_genesis { @@ -136,9 +136,8 @@ mod module2 { frame_support::decl_storage! { trait Store for Module, I: Instance=DefaultInstance> as Module2 { pub Value config(value): T::Amount; - pub Map config(map): map hasher(blake2_256) u64 => u64; - pub LinkedMap config(linked_map): linked_map hasher(blake2_256) u64 => Vec; - pub DoubleMap config(double_map): double_map hasher(blake2_256) u64, hasher(blake2_256) u64 => u64; + pub Map config(map): map hasher(identity) u64 => u64; + pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; } } @@ -285,13 +284,11 @@ fn new_test_ext() -> sp_io::TestExternalities { module2: Some(module2::GenesisConfig { value: 4, map: vec![(0, 0)], - linked_map: vec![(0, vec![0])], double_map: vec![(0, 0, 0)], }), module2_Instance1: Some(module2::GenesisConfig { value: 4, map: vec![(0, 0)], - linked_map: vec![(0, vec![0])], double_map: vec![(0, 0, 0)], }), module2_Instance2: None, @@ -314,17 +311,13 @@ fn storage_instance_independence() { module2::Map::::insert(0, 0); module2::Map::::insert(0, 0); module2::Map::::insert(0, 0); - module2::LinkedMap::::insert::<_, Vec>(0, vec![]); - module2::LinkedMap::::insert::<_, Vec>(0, vec![]); - module2::LinkedMap::::insert::<_, Vec>(0, vec![]); - module2::LinkedMap::::insert::<_, Vec>(0, vec![]); module2::DoubleMap::::insert(&0, &0, &0); module2::DoubleMap::::insert(&0, &0, &0); module2::DoubleMap::::insert(&0, &0, &0); module2::DoubleMap::::insert(&0, &0, &0); }); - // 16 storage values + 4 linked_map head. - assert_eq!(storage.top.len(), 16 + 4); + // 12 storage values. + assert_eq!(storage.top.len(), 12); } #[test] @@ -332,7 +325,6 @@ fn storage_with_instance_basic_operation() { new_test_ext().execute_with(|| { type Value = module2::Value; type Map = module2::Map; - type LinkedMap = module2::LinkedMap; type DoubleMap = module2::DoubleMap; assert_eq!(Value::exists(), true); @@ -360,26 +352,6 @@ fn storage_with_instance_basic_operation() { assert_eq!(Map::contains_key(key), false); assert_eq!(Map::get(key), 0); - assert_eq!(LinkedMap::contains_key(0), true); - assert_eq!(LinkedMap::contains_key(key), false); - LinkedMap::insert(key, vec![1]); - assert_eq!(LinkedMap::enumerate().count(), 2); - assert_eq!(LinkedMap::get(key), vec![1]); - assert_eq!(LinkedMap::take(key), vec![1]); - assert_eq!(LinkedMap::enumerate().count(), 1); - assert_eq!(LinkedMap::get(key), vec![]); - LinkedMap::mutate(key, |a| *a=vec![2]); - assert_eq!(LinkedMap::enumerate().count(), 2); - assert_eq!(LinkedMap::get(key), vec![2]); - LinkedMap::remove(key); - assert_eq!(LinkedMap::enumerate().count(), 1); - assert_eq!(LinkedMap::contains_key(key), false); - assert_eq!(LinkedMap::get(key), vec![]); - assert_eq!(LinkedMap::contains_key(key), false); - assert_eq!(LinkedMap::enumerate().count(), 1); - LinkedMap::insert(key, &vec![1]); - assert_eq!(LinkedMap::enumerate().count(), 2); - let key1 = 1; let key2 = 1; assert_eq!(DoubleMap::contains_key(&0, &0), true); @@ -416,10 +388,10 @@ const EXPECTED_METADATA: StorageMetadata = StorageMetadata { name: DecodeDifferent::Encode("Map"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Identity, key: DecodeDifferent::Encode("u64"), value: DecodeDifferent::Encode("u64"), - is_linked: false, + unused: false, }, default: DecodeDifferent::Encode( DefaultByteGetter( @@ -430,30 +402,12 @@ const EXPECTED_METADATA: StorageMetadata = StorageMetadata { ), documentation: DecodeDifferent::Encode(&[]), }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("LinkedMap"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_256, - key: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("Vec"), - is_linked: true, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructLinkedMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - }, StorageEntryMetadata { name: DecodeDifferent::Encode("DoubleMap"), modifier: StorageEntryModifier::Default, ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Blake2_256, - key2_hasher: StorageHasher::Blake2_256, + hasher: StorageHasher::Identity, + key2_hasher: StorageHasher::Identity, key1: DecodeDifferent::Encode("u64"), key2: DecodeDifferent::Encode("u64"), value: DecodeDifferent::Encode("u64"), diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 45377669811160a0e50cf7af951945f266649c5f..8d8152a5ad0f35b7b2485acb13d49c56b2075392 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -109,7 +109,7 @@ mod module { } else { vec![] } - }): map hasher(blake2_256) Role => Option>; + }): map hasher(blake2_128_concat) Role => Option>; /// the roles members can enter into pub AvailableRoles get(fn available_roles) build(|config: &GenesisConfig| { @@ -125,11 +125,11 @@ mod module { /// actor accounts associated with a role pub AccountIdsByRole get(fn account_ids_by_role): - map hasher(blake2_256) Role => Vec; + map hasher(blake2_128_concat) Role => Vec; /// tokens locked until given block number pub Bondage get(fn bondage): - map hasher(blake2_256) T::AccountId => T::BlockNumber; + map hasher(blake2_128_concat) T::AccountId => T::BlockNumber; /// First step before enter a role is registering intent with a new account/key. /// This is done by sending a role_entry_request() from the new account. diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index 84feb2d93f36cfc2212f44a7fa646d42501355b8..8eacc836c48685c1516ce129780c71f0ee636ea1 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -19,6 +19,7 @@ macro_rules! reserved { frame_support::decl_module! { pub struct Module for enum Call where origin: T::Origin { + #[weight = frame_support::weights::SimpleDispatchInfo::default()] fn $reserved(_origin) -> dispatch::DispatchResult { unreachable!() } } } diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.stderr b/frame/support/test/tests/reserved_keyword/on_initialize.stderr index d20a6e11451e7d52f4ded9a810ddc81f0d58bbb8..e899ef5d789426c620a9c92f7190a2ac5fdabd4f 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.stderr +++ b/frame/support/test/tests/reserved_keyword/on_initialize.stderr @@ -1,39 +1,39 @@ error: Invalid call fn name: `on_finalize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:30:1 + --> $DIR/on_initialize.rs:31:1 | -30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:30:1 + --> $DIR/on_initialize.rs:31:1 | -30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:30:1 + --> $DIR/on_initialize.rs:31:1 | -30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. - --> $DIR/on_initialize.rs:30:1 + --> $DIR/on_initialize.rs:31:1 | -30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) -error: `deposit_event` function is reserved and must follow the syntax: `$vis:vis fn deposit_event() = default;` - --> $DIR/on_initialize.rs:30:1 +error: Invalid call fn name: `deposit_event`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. + --> $DIR/on_initialize.rs:31:1 | -30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); +31 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 4e350be1a90c78b8a18c4da1a77f6d9e4c54f905..78288cff917e233306fe0bee01f5a04d96263f16 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,18 +10,18 @@ description = "FRAME system module" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.2" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.5"} +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/version" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] criterion = "0.2.11" -sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../../primitives/externalities" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } [features] @@ -36,8 +36,11 @@ std = [ "sp-runtime/std", "sp-version/std", ] -runtime-benchmarks = [] +runtime-benchmarks = ["sp-runtime/runtime-benchmarks"] [[bench]] name = "bench" harness = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index 3bcc34698a56efcb82496a076bf9c507acd5d31d..9519427297425504a8fd1b9c769c5c2a6edd899e 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,8 +9,8 @@ repository = "https://github.com/paritytech/substrate/" description = "Runtime API definition required by System RPC extensions." [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } [features] default = ["std"] @@ -18,3 +18,6 @@ std = [ "sp-api/std", "codec/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/system/src/accounts.scale b/frame/system/src/accounts.scale new file mode 100644 index 0000000000000000000000000000000000000000..bfd7e6277f20c53ceb1d664235bb6af18ec538ed Binary files /dev/null and b/frame/system/src/accounts.scale differ diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index b8cd11c07f7b3f640f27263b0453254321b8a78b..a38a8854c75c31f37c6eacfc84b28eb887061565 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -68,13 +68,14 @@ //! ### Example - Get extrinsic count and parent hash for the current block //! //! ``` -//! use frame_support::{decl_module, dispatch}; +//! use frame_support::{decl_module, dispatch, weights::SimpleDispatchInfo}; //! use frame_system::{self as system, ensure_signed}; //! //! pub trait Trait: system::Trait {} //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = SimpleDispatchInfo::default()] //! pub fn system_module_example(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; //! let _extrinsic_count = >::extrinsic_count(); @@ -98,7 +99,7 @@ use sp_std::marker::PhantomData; use sp_std::fmt::Debug; use sp_version::RuntimeVersion; use sp_runtime::{ - RuntimeDebug, Perbill, DispatchOutcome, DispatchError, + RuntimeDebug, Perbill, DispatchOutcome, DispatchError, DispatchResult, generic::{self, Era}, transaction_validity::{ ValidTransaction, TransactionPriority, TransactionLongevity, TransactionValidityError, @@ -106,8 +107,9 @@ use sp_runtime::{ }, traits::{ self, CheckEqual, AtLeast32Bit, Zero, SignedExtension, Lookup, LookupError, - SimpleBitOps, Hash, Member, MaybeDisplay, EnsureOrigin, BadOrigin, SaturatedConversion, + SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, SaturatedConversion, MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, + Dispatchable, DispatchInfoOf, PostDispatchInfoOf, }, }; @@ -116,9 +118,9 @@ use frame_support::{ decl_module, decl_event, decl_storage, decl_error, storage, Parameter, ensure, debug, traits::{ Contains, Get, ModuleToIndex, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, - StoredMap + StoredMap, EnsureOrigin, }, - weights::{Weight, DispatchInfo, DispatchClass, SimpleDispatchInfo, FunctionOf}, + weights::{Weight, DispatchInfo, DispatchClass, SimpleDispatchInfo, FunctionOf} }; use codec::{Encode, Decode, FullCodec, EncodeLike}; @@ -145,7 +147,7 @@ pub trait Trait: 'static + Eq + Clone { + Clone; /// The aggregated `Call` type. - type Call: Debug; + type Call: Dispatchable + Debug; /// Account index (aka nonce) type. This stores the number of previous transactions associated /// with a sender account. @@ -235,8 +237,16 @@ pub type KeyValue = (Vec, Vec); pub enum Phase { /// Applying an extrinsic. ApplyExtrinsic(u32), - /// The end. + /// Finalizing the block. Finalization, + /// Initializing the block. + Initialization, +} + +impl Default for Phase { + fn default() -> Self { + Self::Initialization + } } /// Record of an event happening. @@ -337,10 +347,8 @@ impl From for LastRuntimeUpgradeInfo { decl_storage! { trait Store for Module as System { /// The full account information for a particular account ID. - // TODO: should be hasher(twox64_concat) - will need staged migration - // https://github.com/paritytech/substrate/issues/4917 pub Account get(fn account): - map hasher(blake2_256) T::AccountId => AccountInfo; + map hasher(blake2_128_concat) T::AccountId => AccountInfo; /// Total extrinsics count for the current block. ExtrinsicCount: Option; @@ -352,16 +360,14 @@ decl_storage! { AllExtrinsicsLen: Option; /// Map of block numbers to block hashes. - // TODO: should be hasher(twox64_concat) - will need one-off migration - // https://github.com/paritytech/substrate/issues/4917 pub BlockHash get(fn block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): - map hasher(blake2_256) T::BlockNumber => T::Hash; + map hasher(twox_64_concat) T::BlockNumber => T::Hash; /// Extrinsics data for the current block (maps an extrinsic's index to its data). ExtrinsicData get(fn extrinsic_data): map hasher(twox_64_concat) u32 => Vec; /// The current block number being processed. Set by `execute_block`. - Number get(fn block_number) build(|_| 1.into()): T::BlockNumber; + Number get(fn block_number): T::BlockNumber; /// Hash of the previous block. ParentHash get(fn parent_hash) build(|_| hash69()): T::Hash; @@ -393,10 +399,13 @@ decl_storage! { /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. - EventTopics get(fn event_topics): map hasher(blake2_256) T::Hash => Vec<(T::BlockNumber, EventIndex)>; + EventTopics get(fn event_topics): map hasher(blake2_128_concat) T::Hash => Vec<(T::BlockNumber, EventIndex)>; /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; + + /// The execution phase of the block. + ExecutionPhase: Option; } add_extra_genesis { config(changes_trie_config): Option; @@ -452,7 +461,7 @@ decl_error! { /// Suicide called when the account has non-default composite data. NonDefaultComposite, /// There is a non-zero reference count preventing the account from being purged. - NonZeroRefCount + NonZeroRefCount, } } @@ -488,20 +497,7 @@ decl_module! { /// Set the new runtime code. #[weight = SimpleDispatchInfo::FixedOperational(200_000)] pub fn set_code(origin, code: Vec) { - ensure_root(origin)?; - - let current_version = T::Version::get(); - let new_version = sp_io::misc::runtime_version(&code) - .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) - .ok_or_else(|| Error::::FailedToExtractRuntimeVersion)?; - - if new_version.spec_name != current_version.spec_name { - Err(Error::::InvalidSpecName)? - } - - if new_version.spec_version <= current_version.spec_version { - Err(Error::::SpecVersionNeedsToIncrease)? - } + Self::can_set_code(origin, &code)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); Self::deposit_event(RawEvent::CodeUpdated); @@ -568,13 +564,6 @@ decl_module! { ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); Account::::remove(who); } - - fn on_runtime_upgrade() { - // Remove the old `RuntimeUpgraded` storage entry. - let mut runtime_upgraded_key = sp_io::hashing::twox_128(b"System").to_vec(); - runtime_upgraded_key.extend(&sp_io::hashing::twox_128(b"RuntimeUpgraded")); - sp_io::storage::clear(&runtime_upgraded_key); - } } } @@ -590,12 +579,17 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Root) + } } pub struct EnsureSigned(sp_std::marker::PhantomData); impl< O: Into, O>> + From>, - AccountId, + AccountId: Default, > EnsureOrigin for EnsureSigned { type Success = AccountId; fn try_origin(o: O) -> Result { @@ -604,13 +598,18 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Signed(Default::default())) + } } pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< O: Into, O>> + From>, Who: Contains, - AccountId: PartialEq + Clone + Ord, + AccountId: PartialEq + Clone + Ord + Default, > EnsureOrigin for EnsureSignedBy { type Success = AccountId; fn try_origin(o: O) -> Result { @@ -619,6 +618,16 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + let members = Who::sorted_members(); + let first_member = match members.get(0) { + Some(account) => account.clone(), + None => Default::default(), + }; + O::from(RawOrigin::Signed(first_member.clone())) + } } pub struct EnsureNone(sp_std::marker::PhantomData); @@ -633,6 +642,11 @@ impl< r => Err(O::from(r)), }) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::None) + } } pub struct EnsureNever(sp_std::marker::PhantomData); @@ -641,6 +655,11 @@ impl EnsureOrigin for EnsureNever { fn try_origin(o: O) -> Result { Err(o) } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + unimplemented!() + } } /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). @@ -734,8 +753,11 @@ impl Module { /// This will update storage entries that correspond to the specified topics. /// It is expected that light-clients could subscribe to this topics. pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { - let extrinsic_index = Self::extrinsic_index(); - let phase = extrinsic_index.map_or(Phase::Finalization, |c| Phase::ApplyExtrinsic(c)); + let block_number = Self::block_number(); + // Don't populate events on genesis. + if block_number.is_zero() { return } + + let phase = ExecutionPhase::get().unwrap_or_default(); let event = EventRecord { phase, event, @@ -767,10 +789,9 @@ impl Module { return; } - let block_no = Self::block_number(); for topic in topics { // The same applies here. - if >::append(topic, &[(block_no, event_idx)]).is_err() { + if >::append(topic, &[(block_number, event_idx)]).is_err() { return; } } @@ -827,6 +848,7 @@ impl Module { kind: InitKind, ) { // populate environment + ExecutionPhase::put(Phase::Initialization); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); >::put(number); >::put(digest); @@ -843,6 +865,7 @@ impl Module { /// Remove temporary "environment" entries in storage. pub fn finalize() -> T::Header { + ExecutionPhase::kill(); ExtrinsicCount::kill(); AllExtrinsicsWeight::kill(); AllExtrinsicsLen::kill(); @@ -973,6 +996,7 @@ impl Module { let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); + ExecutionPhase::put(Phase::ApplyExtrinsic(next_extrinsic_index)); } /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block @@ -981,6 +1005,13 @@ impl Module { let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX) .unwrap_or_default(); ExtrinsicCount::put(extrinsic_index); + ExecutionPhase::put(Phase::Finalization); + } + + /// To be called immediately after finishing the initialization of the block + /// (e.g., called `on_initialize` for all modules). + pub fn note_finished_initialize() { + ExecutionPhase::put(Phase::ApplyExtrinsic(0)) } /// Remove all extrinsic data and save the extrinsics trie root. @@ -1020,6 +1051,32 @@ impl Module { Module::::on_killed_account(who.clone()); } } + + /// Determine whether or not it is possible to update the code. + /// + /// This function has no side effects and is idempotent, but is fairly + /// heavy. It is automatically called by `set_code`; in most cases, + /// a direct call to `set_code` is preferable. It is useful to call + /// `can_set_code` when it is desirable to perform the appropriate + /// runtime checks without actually changing the code yet. + pub fn can_set_code(origin: T::Origin, code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + ensure_root(origin)?; + + let current_version = T::Version::get(); + let new_version = sp_io::misc::runtime_version(&code) + .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) + .ok_or_else(|| Error::::FailedToExtractRuntimeVersion)?; + + if new_version.spec_name != current_version.spec_name { + Err(Error::::InvalidSpecName)? + } + + if new_version.spec_version <= current_version.spec_version { + Err(Error::::SpecVersionNeedsToIncrease)? + } + + Ok(()) + } } /// Event handler which calls on_created_account when it happens. @@ -1111,13 +1168,16 @@ pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct CheckWeight(PhantomData); -impl CheckWeight { +impl CheckWeight where + T::Call: Dispatchable +{ /// Get the quota ratio of each dispatch class type. This indicates that all operational /// dispatches can use the full capacity of any resource, while user-triggered ones can consume /// a portion. fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { match class { - DispatchClass::Operational => ::one(), + DispatchClass::Operational | DispatchClass::Mandatory + => ::one(), DispatchClass::Normal => T::AvailableBlockRatio::get(), } } @@ -1126,14 +1186,14 @@ impl CheckWeight { /// /// Upon successes, it returns the new block weight as a `Result`. fn check_weight( - info: ::DispatchInfo, + info: &DispatchInfoOf, ) -> Result { let current_weight = Module::::all_extrinsics_weight(); let maximum_weight = T::MaximumBlockWeight::get(); let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_weight; let added_weight = info.weight.min(limit); let next_weight = current_weight.saturating_add(added_weight); - if next_weight > limit { + if next_weight > limit && info.class != DispatchClass::Mandatory { Err(InvalidTransaction::ExhaustsResources.into()) } else { Ok(next_weight) @@ -1144,7 +1204,7 @@ impl CheckWeight { /// /// Upon successes, it returns the new block length as a `Result`. fn check_block_length( - info: ::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result { let current_len = Module::::all_extrinsics_len(); @@ -1160,10 +1220,12 @@ impl CheckWeight { } /// get the priority of an extrinsic denoted by `info`. - fn get_priority(info: ::DispatchInfo) -> TransactionPriority { + fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { match info.class { DispatchClass::Normal => info.weight.into(), - DispatchClass::Operational => Bounded::max_value() + DispatchClass::Operational => Bounded::max_value(), + // Mandatory extrinsics are only for inherents; never transactions. + DispatchClass::Mandatory => Bounded::min_value(), } } @@ -1176,7 +1238,7 @@ impl CheckWeight { /// /// It checks and notes the new weight and length. fn do_pre_dispatch( - info: ::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { let next_len = Self::check_block_length(info, len)?; @@ -1190,7 +1252,7 @@ impl CheckWeight { /// /// It only checks that the block weight and length limit will not exceed. fn do_validate( - info: ::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { // ignore the next weight and length. If they return `Ok`, then it is below the limit. @@ -1201,11 +1263,12 @@ impl CheckWeight { } } -impl SignedExtension for CheckWeight { +impl SignedExtension for CheckWeight where + T::Call: Dispatchable +{ type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = (); - type DispatchInfo = DispatchInfo; type Pre = (); const IDENTIFIER: &'static str = "CheckWeight"; @@ -1215,9 +1278,12 @@ impl SignedExtension for CheckWeight { self, _who: &Self::AccountId, _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } Self::do_pre_dispatch(info, len) } @@ -1225,15 +1291,18 @@ impl SignedExtension for CheckWeight { &self, _who: &Self::AccountId, _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } Self::do_validate(info, len) } fn pre_dispatch_unsigned( _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result<(), TransactionValidityError> { Self::do_pre_dispatch(info, len) @@ -1241,11 +1310,27 @@ impl SignedExtension for CheckWeight { fn validate_unsigned( _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { Self::do_validate(info, len) } + + fn post_dispatch( + _pre: Self::Pre, + info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + // Since mandatory dispatched do not get validated for being overweight, we are sensitive + // to them actually being useful. Block producers are thus not allowed to include mandatory + // extrinsics that result in error. + if info.class == DispatchClass::Mandatory && result.is_err() { + Err(InvalidTransaction::BadMandatory)? + } + Ok(()) + } } impl Debug for CheckWeight { @@ -1283,11 +1368,12 @@ impl Debug for CheckNonce { } } -impl SignedExtension for CheckNonce { +impl SignedExtension for CheckNonce where + T::Call: Dispatchable +{ type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = (); - type DispatchInfo = DispatchInfo; type Pre = (); const IDENTIFIER: &'static str = "CheckNonce"; @@ -1297,7 +1383,7 @@ impl SignedExtension for CheckNonce { self, who: &Self::AccountId, _call: &Self::Call, - _info: Self::DispatchInfo, + _info: &DispatchInfoOf, _len: usize, ) -> Result<(), TransactionValidityError> { let mut account = Account::::get(who); @@ -1319,7 +1405,7 @@ impl SignedExtension for CheckNonce { &self, who: &Self::AccountId, _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { // check index @@ -1378,7 +1464,6 @@ impl SignedExtension for CheckEra { type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = T::Hash; - type DispatchInfo = DispatchInfo; type Pre = (); const IDENTIFIER: &'static str = "CheckEra"; @@ -1386,7 +1471,7 @@ impl SignedExtension for CheckEra { &self, _who: &Self::AccountId, _call: &Self::Call, - _info: Self::DispatchInfo, + _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { let current_u64 = >::block_number().saturated_into::(); @@ -1435,7 +1520,6 @@ impl SignedExtension for CheckGenesis { type AccountId = T::AccountId; type Call = ::Call; type AdditionalSigned = T::Hash; - type DispatchInfo = DispatchInfo; type Pre = (); const IDENTIFIER: &'static str = "CheckGenesis"; @@ -1471,7 +1555,6 @@ impl SignedExtension for CheckVersion { type AccountId = T::AccountId; type Call = ::Call; type AdditionalSigned = u32; - type DispatchInfo = DispatchInfo; type Pre = (); const IDENTIFIER: &'static str = "CheckVersion"; @@ -1535,9 +1618,22 @@ mod tests { fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } } + #[derive(Debug)] + pub struct Call {} + impl Dispatchable for Call { + type Origin = (); + type Trait = (); + type Info = DispatchInfo; + type PostInfo = (); + fn dispatch(self, _origin: Self::Origin) + -> sp_runtime::DispatchResultWithInfo { + panic!("Do not use dummy implementation for dispatch."); + } + } + impl Trait for Test { type Origin = Origin; - type Call = (); + type Call = Call; type Index = u64; type BlockNumber = u64; type Hash = H256; @@ -1570,7 +1666,7 @@ mod tests { type System = Module; - const CALL: &::Call = &(); + const CALL: &::Call = &Call {}; fn new_test_ext() -> sp_io::TestExternalities { GenesisConfig::default().build_storage::().unwrap().into() @@ -1643,6 +1739,8 @@ mod tests { &Default::default(), InitKind::Full, ); + System::deposit_event(32u16); + System::note_finished_initialize(); System::deposit_event(42u16); System::note_applied_extrinsic(&Ok(()), 0, Default::default()); System::note_applied_extrinsic(&Err(DispatchError::BadOrigin), 0, Default::default()); @@ -1652,6 +1750,7 @@ mod tests { assert_eq!( System::events(), vec![ + EventRecord { phase: Phase::Initialization, event: 32u16, topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(0), event: 42u16, topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(0), event: 100u16, topics: vec![] }, EventRecord { phase: Phase::ApplyExtrinsic(1), event: 101u16, topics: vec![] }, @@ -1768,14 +1867,14 @@ mod tests { let info = DispatchInfo::default(); let len = 0_usize; // stale - assert!(CheckNonce::(0).validate(&1, CALL, info, len).is_err()); - assert!(CheckNonce::(0).pre_dispatch(&1, CALL, info, len).is_err()); + assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); + assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); // correct - assert!(CheckNonce::(1).validate(&1, CALL, info, len).is_ok()); - assert!(CheckNonce::(1).pre_dispatch(&1, CALL, info, len).is_ok()); + assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); // future - assert!(CheckNonce::(5).validate(&1, CALL, info, len).is_ok()); - assert!(CheckNonce::(5).pre_dispatch(&1, CALL, info, len).is_err()); + assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); }) } @@ -1800,9 +1899,9 @@ mod tests { if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } }; - reset_check_weight(small, false, 0); - reset_check_weight(medium, false, 0); - reset_check_weight(big, true, 1); + reset_check_weight(&small, false, 0); + reset_check_weight(&medium, false, 0); + reset_check_weight(&big, true, 1); }) } @@ -1813,7 +1912,7 @@ mod tests { let len = 0_usize; assert_eq!(System::all_extrinsics_weight(), 0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, free, len); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); assert!(r.is_ok()); assert_eq!(System::all_extrinsics_weight(), 0); }) @@ -1827,7 +1926,7 @@ mod tests { let normal_limit = normal_weight_limit(); assert_eq!(System::all_extrinsics_weight(), 0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, max, len); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &max, len); assert!(r.is_ok()); assert_eq!(System::all_extrinsics_weight(), normal_limit); }) @@ -1844,15 +1943,15 @@ mod tests { // given almost full block AllExtrinsicsWeight::put(normal_limit); // will not fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, normal, len).is_err()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); // will fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, op, len).is_ok()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); // likewise for length limit. let len = 100_usize; AllExtrinsicsLen::put(normal_length_limit()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, normal, len).is_err()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, op, len).is_ok()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); + assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); }) } @@ -1864,13 +1963,13 @@ mod tests { let len = 0_usize; let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, normal, len) + .validate(&1, CALL, &normal, len) .unwrap() .priority; assert_eq!(priority, 100); let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, op, len) + .validate(&1, CALL, &op, len) .unwrap() .priority; assert_eq!(priority, u64::max_value()); @@ -1888,16 +1987,16 @@ mod tests { if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } }; - reset_check_weight(normal, normal_limit - 1, false); - reset_check_weight(normal, normal_limit, false); - reset_check_weight(normal, normal_limit + 1, true); + reset_check_weight(&normal, normal_limit - 1, false); + reset_check_weight(&normal, normal_limit, false); + reset_check_weight(&normal, normal_limit + 1, true); // Operational ones don't have this limit. let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: true }; - reset_check_weight(op, normal_limit, false); - reset_check_weight(op, normal_limit + 100, false); - reset_check_weight(op, 1024, false); - reset_check_weight(op, 1025, true); + reset_check_weight(&op, normal_limit, false); + reset_check_weight(&op, normal_limit + 100, false); + reset_check_weight(&op, 1024, false); + reset_check_weight(&op, 1025, true); }) } @@ -1929,7 +2028,7 @@ mod tests { System::set_block_number(17); >::insert(16, H256::repeat_byte(1)); - assert_eq!(ext.validate(&1, CALL, normal, len).unwrap().longevity, 15); + assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); }) } @@ -1988,6 +2087,7 @@ mod tests { let mut ext = new_test_ext(); ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); ext.execute_with(|| { + System::set_block_number(1); System::set_code( RawOrigin::Root.into(), substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), @@ -1995,7 +2095,7 @@ mod tests { assert_eq!( System::events(), - vec![EventRecord { phase: Phase::ApplyExtrinsic(0), event: 102u16, topics: vec![] }], + vec![EventRecord { phase: Phase::Initialization, event: 102u16, topics: vec![] }], ); }); } @@ -2015,4 +2115,18 @@ mod tests { ).unwrap(); }); } + + #[test] + fn events_not_emitted_during_genesis() { + new_test_ext().execute_with(|| { + // Block Number is zero at genesis + assert!(System::block_number().is_zero()); + System::on_created_account(Default::default()); + assert!(System::events().is_empty()); + // Events will be emitted starting on block 1 + System::set_block_number(1); + System::on_created_account(Default::default()); + assert!(System::events().len() == 1); + }); + } } diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 5abafe7655172c8b41d81a197d9510730df83f68..a3fe3e00ca4be879ed6074cc6b13f75e0fbae83f 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -95,7 +95,8 @@ impl Signer for TAnyAppPubl } /// Retrieves a public key type for given `SignAndSubmitTransaction`. -pub type PublicOf = < +pub type PublicOf = +< >::CreateTransaction as CreateTransaction>::Extrinsic> @@ -109,7 +110,7 @@ pub type PublicOf = < /// you should use. pub trait SignAndSubmitTransaction { /// Unchecked extrinsic type. - type Extrinsic: ExtrinsicT + codec::Encode; + type Extrinsic: ExtrinsicT + Encode; /// A runtime-specific type to produce signed data for the extrinsic. type CreateTransaction: CreateTransaction; @@ -156,7 +157,7 @@ pub trait SignAndSubmitTransaction { /// you should use. pub trait SubmitUnsignedTransaction { /// Unchecked extrinsic type. - type Extrinsic: ExtrinsicT + codec::Encode; + type Extrinsic: ExtrinsicT + Encode; /// Submit given call to the transaction pool as unsigned transaction. /// @@ -164,7 +165,8 @@ pub trait SubmitUnsignedTransaction { /// and `Err` if transaction was rejected from the pool. fn submit_unsigned(call: impl Into) -> Result<(), ()> { let xt = Self::Extrinsic::new(call.into(), None).ok_or(())?; - sp_io::offchain::submit_transaction(xt.encode()) + let encoded_xt = xt.encode(); + sp_io::offchain::submit_transaction(encoded_xt) } } @@ -291,7 +293,7 @@ impl SignAndSubmitTransaction for TransactionSubmitte T: crate::Trait, C: CreateTransaction, S: Signer<>::Public, >::Signature>, - E: ExtrinsicT + codec::Encode, + E: ExtrinsicT + Encode, { type Extrinsic = E; type CreateTransaction = C; @@ -301,7 +303,7 @@ impl SignAndSubmitTransaction for TransactionSubmitte /// A blanket implementation to use the same submitter for unsigned transactions as well. impl SubmitUnsignedTransaction for TransactionSubmitter where T: crate::Trait, - E: ExtrinsicT + codec::Encode, + E: ExtrinsicT + Encode, { type Extrinsic = E; } @@ -310,7 +312,7 @@ impl SubmitUnsignedTransaction for TransactionSubmitt impl SubmitSignedTransaction for TransactionSubmitter where T: crate::Trait, C: CreateTransaction, - E: ExtrinsicT + codec::Encode, + E: ExtrinsicT + Encode, S: Signer<>::Public, >::Signature>, // Make sure we can unwrap the app crypto key. S: RuntimeAppPublic + AppPublic + Into<::Generic>, diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index ff5b72de670e6cfd364c008e82ff3b0ec168be2d..eb7358e197a189dd33b55e985c1b9a65b86a72d8 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-timestamp" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,20 +12,20 @@ documentation = "https://docs.rs/pallet-timestamp" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io", optional = true } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -41,3 +41,6 @@ std = [ "sp-timestamp/std" ] runtime-benchmarks = ["frame-benchmarking", "sp-io"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 5f69cdbe7e0463d0698b2e9fc12df536c51dc9e9..01a3d502a818e5cca2a166e5fd0a08b1e7ed6b74 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -16,10 +16,10 @@ //! Timestamp pallet benchmarking. -use super::*; +#![cfg(feature = "runtime-benchmarks")] +use super::*; use sp_std::prelude::*; - use frame_system::RawOrigin; use frame_benchmarking::benchmarks; @@ -34,3 +34,17 @@ benchmarks! { let n in ...; }: _(RawOrigin::None, n.into()) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set::()); + }); + } +} diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 2a37dfdddb62a89fc7a3aea9f2ce9ef8880421a7..6df8b4606565038d1df8fddce34000bcf3bb8521 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -69,6 +69,7 @@ //! //! decl_module! { //! pub struct Module for enum Call where origin: T::Origin { +//! #[weight = frame_support::weights::SimpleDispatchInfo::default()] //! pub fn get_time(origin) -> dispatch::DispatchResult { //! let _sender = ensure_signed(origin)?; //! let _now = >::get(); @@ -90,20 +91,23 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "runtime-benchmarks")] mod benchmarking; use sp_std::{result, cmp}; use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; -use frame_support::{Parameter, decl_storage, decl_module}; -use frame_support::traits::{Time, Get}; +#[cfg(feature = "std")] +use frame_support::debug; +use frame_support::{ + Parameter, decl_storage, decl_module, + traits::{Time, UnixTime, Get}, + weights::SimpleDispatchInfo, +}; use sp_runtime::{ RuntimeString, traits::{ AtLeast32Bit, Zero, SaturatedConversion, Scale } }; -use frame_support::weights::SimpleDispatchInfo; use frame_system::ensure_none; use sp_timestamp::{ InherentError, INHERENT_IDENTIFIER, InherentType, @@ -143,7 +147,7 @@ decl_module! { /// `MinimumPeriod`. /// /// The dispatch origin for this call must be `Inherent`. - #[weight = SimpleDispatchInfo::FixedOperational(10_000)] + #[weight = SimpleDispatchInfo::FixedMandatory(10_000)] fn set(origin, #[compact] now: T::Moment) { ensure_none(origin)?; assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); @@ -239,6 +243,25 @@ impl Time for Module { } } +/// Before the timestamp inherent is applied, it returns the time of previous block. +/// +/// On genesis the time returned is not valid. +impl UnixTime for Module { + fn now() -> core::time::Duration { + // now is duration since unix epoch in millisecond as documented in + // `sp_timestamp::InherentDataProvider`. + let now = Self::now(); + sp_std::if_std! { + if now == T::Moment::zero() { + debug::error!( + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" + ); + } + } + core::time::Duration::from_millis(now.saturated_into::()) + } +} + #[cfg(test)] mod tests { use super::*; @@ -248,6 +271,11 @@ mod tests { use sp_core::H256; use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; + pub fn new_test_ext() -> TestExternalities { + let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + TestExternalities::new(t) + } + impl_outer_origin! { pub enum Origin for Test where system = frame_system {} } @@ -293,8 +321,7 @@ mod tests { #[test] fn timestamp_works() { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { + new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); assert_eq!(Timestamp::now(), 69); @@ -304,8 +331,7 @@ mod tests { #[test] #[should_panic(expected = "Timestamp must be updated only once in the block")] fn double_timestamp_should_fail() { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { + new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); let _ = Timestamp::dispatch(Call::set(70), Origin::NONE); @@ -315,8 +341,7 @@ mod tests { #[test] #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] fn block_period_minimum_enforced() { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { + new_test_ext().execute_with(|| { Timestamp::set_timestamp(42); let _ = Timestamp::dispatch(Call::set(46), Origin::NONE); }); diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index 3a291dc51631288b43415039c4c1e831800c2d84..811c2885b1bdaa674247c7e9510531efa8fe0e95 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,17 +9,17 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME pallet to manage transaction payments" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "./rpc/runtime-api" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.5", default-features = false, path = "./rpc/runtime-api" } [dev-dependencies] -sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-io = { version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -31,3 +31,6 @@ std = [ "frame-system/std", "pallet-transaction-payment-rpc-runtime-api/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 309dfeedd5be420bb84615c1e0701ef74b5a58e4..cd74829b29b049fb18b29a661b00bd8763c3c725 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,14 +9,17 @@ repository = "https://github.com/paritytech/substrate/" description = "RPC interface for the transaction payment module." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0" } +codec = { package = "parity-scale-codec", version = "1.3.0" } jsonrpc-core = "14.0.3" -jsonrpc-core-client = "14.0.3" +jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.3" -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0-alpha.2", path = "../../../primitives/rpc" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-alpha.5", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.5", path = "./runtime-api" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 96716769a39a89802544498acaabf52bab2c701c..447590111c7ee65ee098a2a508647c5fe7cefc57 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,11 +10,11 @@ description = "RPC runtime API for transaction payment FRAME pallet" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../support" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../../support" } [dev-dependencies] serde_json = "1.0.41" @@ -29,3 +29,6 @@ std = [ "sp-runtime/std", "frame-support/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 3a1239630f9efdc29bfcb8ec6f42dbf251542637..7cf364d700f42142132c8df687ce3ad38884e498 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -44,12 +44,13 @@ use sp_runtime::{ TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, TransactionValidity, }, - traits::{Zero, Saturating, SignedExtension, SaturatedConversion, Convert}, + traits::{ + Zero, Saturating, SignedExtension, SaturatedConversion, Convert, Dispatchable, + DispatchInfoOf, + }, }; use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -mod migration; - type Multiplier = Fixed64; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -97,14 +98,12 @@ decl_module! { *fm = T::FeeMultiplierUpdate::convert(*fm) }); } - - fn on_runtime_upgrade() { - migration::on_runtime_upgrade() - } } } -impl Module { +impl Module where + T::Call: Dispatchable, +{ /// Query the data that we know about the fee of a given `call`. /// /// As this module is not and cannot be aware of the internals of a signed extension, it only @@ -112,11 +111,6 @@ impl Module { /// /// All dispatchables must be annotated with weight and will have some fee info. This function /// always returns. - // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some hassle - // for sure. We have to make it aware of the index of `ChargeTransactionPayment` in `Extra`. - // Alternatively, we could actually execute the tx's per-dispatch and record the balance of the - // sender before and after the pipeline.. but this is way too much hassle for a very very little - // potential gain in the future. pub fn query_info( unchecked_extrinsic: Extrinsic, len: u32, @@ -125,10 +119,15 @@ impl Module { T: Send + Sync, BalanceOf: Send + Sync, { + // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some + // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in + // `Extra`. Alternatively, we could actually execute the tx's per-dispatch and record the + // balance of the sender before and after the pipeline.. but this is way too much hassle for + // a very very little potential gain in the future. let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); let partial_fee = - >::compute_fee(len, dispatch_info, 0u32.into()); + >::compute_fee(len, &dispatch_info, 0u32.into()); let DispatchInfo { weight, class, .. } = dispatch_info; RuntimeDispatchInfo { weight, class, partial_fee } @@ -140,7 +139,9 @@ impl Module { #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment { +impl ChargeTransactionPayment where + T::Call: Dispatchable, +{ /// utility constructor. Used only in client/factory code. pub fn from(fee: BalanceOf) -> Self { Self(fee) @@ -162,7 +163,7 @@ impl ChargeTransactionPayment { /// final_fee = base_fee + targeted_fee_adjustment(len_fee + weight_fee) + tip; pub fn compute_fee( len: u32, - info: ::DispatchInfo, + info: &DispatchInfoOf, tip: BalanceOf, ) -> BalanceOf where @@ -206,14 +207,14 @@ impl sp_std::fmt::Debug for ChargeTransactionPayment } } -impl SignedExtension for ChargeTransactionPayment - where BalanceOf: Send + Sync +impl SignedExtension for ChargeTransactionPayment where + BalanceOf: Send + Sync, + T::Call: Dispatchable, { const IDENTIFIER: &'static str = "ChargeTransactionPayment"; type AccountId = T::AccountId; type Call = T::Call; type AdditionalSigned = (); - type DispatchInfo = DispatchInfo; type Pre = (); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } @@ -221,7 +222,7 @@ impl SignedExtension for ChargeTransactionPayment &self, who: &Self::AccountId, _call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { // pay any fees. @@ -444,14 +445,14 @@ mod tests { let len = 10; assert!( ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, info_from_weight(5), len) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) .is_ok() ); assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); assert!( ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, info_from_weight(3), len) + .pre_dispatch(&2, CALL, &info_from_weight(3), len) .is_ok() ); assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 3 - 5); @@ -469,7 +470,7 @@ mod tests { // maximum weight possible assert!( ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, info_from_weight(Weight::max_value()), 10) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) .is_ok() ); // fee will be proportional to what is the actual maximum weight in the runtime. @@ -501,7 +502,7 @@ mod tests { }; assert!( ChargeTransactionPayment::::from(0) - .validate(&1, CALL, operational_transaction , len) + .validate(&1, CALL, &operational_transaction , len) .is_ok() ); @@ -513,7 +514,7 @@ mod tests { }; assert!( ChargeTransactionPayment::::from(0) - .validate(&1, CALL, free_transaction , len) + .validate(&1, CALL, &free_transaction , len) .is_err() ); }); @@ -533,7 +534,7 @@ mod tests { assert!( ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, info_from_weight(3), len) + .pre_dispatch(&1, CALL, &info_from_weight(3), len) .is_ok() ); assert_eq!(Balances::free_balance(1), 100 - 10 - 5 - (10 + 3) * 3 / 2); @@ -593,25 +594,25 @@ mod tests { class: DispatchClass::Operational, pays_fee: false, }; - assert_eq!(ChargeTransactionPayment::::compute_fee(0, dispatch_info, 10), 10); + assert_eq!(ChargeTransactionPayment::::compute_fee(0, &dispatch_info, 10), 10); // No tip, only base fee works let dispatch_info = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: true, }; - assert_eq!(ChargeTransactionPayment::::compute_fee(0, dispatch_info, 0), 100); + assert_eq!(ChargeTransactionPayment::::compute_fee(0, &dispatch_info, 0), 100); // Tip + base fee works - assert_eq!(ChargeTransactionPayment::::compute_fee(0, dispatch_info, 69), 169); + assert_eq!(ChargeTransactionPayment::::compute_fee(0, &dispatch_info, 69), 169); // Len (byte fee) + base fee works - assert_eq!(ChargeTransactionPayment::::compute_fee(42, dispatch_info, 0), 520); + assert_eq!(ChargeTransactionPayment::::compute_fee(42, &dispatch_info, 0), 520); // Weight fee + base fee works let dispatch_info = DispatchInfo { weight: 1000, class: DispatchClass::Operational, pays_fee: true, }; - assert_eq!(ChargeTransactionPayment::::compute_fee(0, dispatch_info, 0), 1100); + assert_eq!(ChargeTransactionPayment::::compute_fee(0, &dispatch_info, 0), 1100); }); } @@ -632,7 +633,7 @@ mod tests { class: DispatchClass::Operational, pays_fee: true, }; - assert_eq!(ChargeTransactionPayment::::compute_fee(0, dispatch_info, 0), 100); + assert_eq!(ChargeTransactionPayment::::compute_fee(0, &dispatch_info, 0), 100); // Everything works together :) let dispatch_info = DispatchInfo { @@ -644,7 +645,7 @@ mod tests { // adjustable fee = (123 * 1) + (456 * 10) = 4683 // adjusted fee = (4683 * .5) + 4683 = 7024.5 -> 7024 // final fee = 100 + 7024 + 789 tip = 7913 - assert_eq!(ChargeTransactionPayment::::compute_fee(456, dispatch_info, 789), 7913); + assert_eq!(ChargeTransactionPayment::::compute_fee(456, &dispatch_info, 789), 7913); }); } @@ -666,7 +667,7 @@ mod tests { assert_eq!( ChargeTransactionPayment::::compute_fee( ::max_value(), - dispatch_info, + &dispatch_info, ::max_value() ), ::max_value() diff --git a/frame/transaction-payment/src/migration.rs b/frame/transaction-payment/src/migration.rs deleted file mode 100644 index 6db3cfd0f9b6003a3a1d4722c19c02ef057ae465..0000000000000000000000000000000000000000 --- a/frame/transaction-payment/src/migration.rs +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Migration code to update storage. - -use super::*; -use frame_support::storage::migration::{put_storage_value, take_storage_value}; - -pub fn on_runtime_upgrade() { - change_name_balances_to_transaction_payment() -} - -// Change the storage name used by this pallet from `Balances` to `TransactionPayment`. -// -// Since the format of the storage items themselves have not changed, we do not -// need to keep track of a storage version. If the runtime does not need to be -// upgraded, nothing here will happen anyway. - -fn change_name_balances_to_transaction_payment() { - sp_runtime::print("Migrating Transaction Payment."); - - if let Some(next_fee_multiplier) = take_storage_value::(b"Balances", b"NextFeeMultiplier", &[]) { - put_storage_value(b"TransactionPayment", b"NextFeeMultiplier", &[], next_fee_multiplier); - } -} diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 99071e927635e0948bc47a065589e142396eab7e..6951c0eba9ae7bd566681e385ab8bf7ca9400718 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-treasury" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,16 +10,18 @@ description = "FRAME pallet to manage treasury" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-alpha.5", default-features = false, path = "../balances" } + +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.5", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } [features] default = ["std"] @@ -32,3 +34,10 @@ std = [ "frame-system/std", "pallet-balances/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..f901576c95d4b0c8d384737502654672ccd8a795 --- /dev/null +++ b/frame/treasury/src/benchmarking.rs @@ -0,0 +1,241 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Treasury pallet benchmarking. + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; + +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account}; +use frame_support::traits::OnInitialize; + +use crate::Module as Treasury; + +const SEED: u32 = 0; + +// Create the pre-requisite information needed to create a treasury `propose_spend`. +fn setup_proposal(u: u32) -> ( + T::AccountId, + BalanceOf, + ::Source, +) { + let caller = account("caller", u, SEED); + let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); + let _ = T::Currency::make_free_balance_be(&caller, value); + let beneficiary = account("beneficiary", u, SEED); + let beneficiary_lookup = T::Lookup::unlookup(beneficiary); + (caller, value, beneficiary_lookup) +} + +// Create the pre-requisite information needed to create a `report_awesome`. +fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { + let caller = account("caller", 0, SEED); + let value = T::TipReportDepositBase::get() + + T::TipReportDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); + let _ = T::Currency::make_free_balance_be(&caller, value); + let reason = vec![0; length as usize]; + let awesome_person = account("awesome", 0, SEED); + (caller, reason, awesome_person) +} + +// Create the pre-requisite information needed to call `tip_new`. +fn setup_tip(r: u32, t: u32) -> + Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> +{ + let tippers_count = T::Tippers::count(); + + for i in 0 .. t { + let member = account("member", i, SEED); + T::Tippers::add(&member); + ensure!(T::Tippers::contains(&member), "failed to add tipper"); + } + + ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); + let caller = account("member", t - 1, SEED); + let reason = vec![0; r as usize]; + let beneficiary = account("beneficiary", t, SEED); + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Ok((caller, reason, beneficiary, value)) +} + +// Create `t` new tips for the tip proposal with `hash`. +// This function automatically makes the tip able to close. +fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { + for i in 0 .. t { + let caller = account("member", i, SEED); + ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); + Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + } + Tips::::mutate(hash, |maybe_tip| { + if let Some(open_tip) = maybe_tip { + open_tip.closes = Some(T::BlockNumber::zero()); + } + }); + Ok(()) +} + +// Create proposals that are approved for use in `on_initialize`. +fn create_approved_proposals(n: u32) -> Result<(), &'static str> { + for i in 0 .. n { + let (caller, value, lookup) = setup_proposal::(i); + Treasury::::propose_spend( + RawOrigin::Signed(caller).into(), + value, + lookup + )?; + let proposal_id = ProposalCount::get() - 1; + Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + } + ensure!(Approvals::get().len() == n as usize, "Not all approved"); + Ok(()) +} + +const MAX_BYTES: u32 = 16384; +const MAX_TIPPERS: u32 = 100; + +benchmarks! { + _ { } + + propose_spend { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + }: _(RawOrigin::Signed(caller), value, beneficiary_lookup) + + reject_proposal { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + Treasury::::propose_spend( + RawOrigin::Signed(caller).into(), + value, + beneficiary_lookup + )?; + let proposal_id = ProposalCount::get() - 1; + }: _(RawOrigin::Root, proposal_id) + + approve_proposal { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + Treasury::::propose_spend( + RawOrigin::Signed(caller).into(), + value, + beneficiary_lookup + )?; + let proposal_id = ProposalCount::get() - 1; + }: _(RawOrigin::Root, proposal_id) + + report_awesome { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + }: _(RawOrigin::Signed(caller), reason, awesome_person) + + retract_tip { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + Treasury::::report_awesome( + RawOrigin::Signed(caller.clone()).into(), + reason.clone(), + awesome_person.clone() + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + }: _(RawOrigin::Signed(caller), hash) + + tip_new { + let r in 0 .. MAX_BYTES; + let t in 1 .. MAX_TIPPERS; + + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + + tip { + let t in 1 .. MAX_TIPPERS; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Treasury::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; + let caller = account("member", t - 1, SEED); + }: _(RawOrigin::Signed(caller), hash, value) + + close_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + let pot_account = Treasury::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Treasury::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + // Create a bunch of tips + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t, hash.clone(), value)?; + + let caller = account("caller", t, SEED); + }: _(RawOrigin::Signed(caller), hash) + + on_initialize { + let p in 0 .. 100; + let pot_account = Treasury::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); + create_approved_proposals::(p)?; + }: { + Treasury::::on_initialize(T::BlockNumber::zero()); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose_spend::()); + assert_ok!(test_benchmark_reject_proposal::()); + assert_ok!(test_benchmark_approve_proposal::()); + assert_ok!(test_benchmark_report_awesome::()); + assert_ok!(test_benchmark_retract_tip::()); + assert_ok!(test_benchmark_tip_new::()); + assert_ok!(test_benchmark_tip::()); + assert_ok!(test_benchmark_close_tip::()); + assert_ok!(test_benchmark_on_initialize::()); + }); + } +} diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index bbf31cc599da0ec63ce8f1fd77fcd917bc223d80..f07b9b511e1682edfd3c450239ed95eef2f546a2 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -92,15 +92,19 @@ use serde::{Serialize, Deserialize}; use sp_std::prelude::*; use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; use frame_support::traits::{ - Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, ExistenceRequirement::AllowDeath, + Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::KeepAlive, ReservableCurrency, WithdrawReason }; use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, traits::{ - Zero, EnsureOrigin, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin + Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin }}; -use frame_support::{weights::SimpleDispatchInfo, traits::Contains}; +use frame_support::weights::{Weight, WeighData, SimpleDispatchInfo}; +use frame_support::traits::{Contains, EnsureOrigin}; use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed}; +use frame_system::{self as system, ensure_signed, ensure_root}; + +mod tests; +mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; @@ -200,7 +204,9 @@ decl_storage! { ProposalCount get(fn proposal_count): ProposalIndex; /// Proposals that have been made. - Proposals get(fn proposals): map hasher(blake2_256) ProposalIndex => Option>>; + Proposals get(fn proposals): + map hasher(twox_64_concat) ProposalIndex + => Option>>; /// Proposal indices that have been approved but not yet awarded. Approvals get(fn approvals): Vec; @@ -208,12 +214,13 @@ decl_storage! { /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. /// This has the insecure enumerable hash function since the key itself is already /// guaranteed to be a secure hash. - pub Tips get(fn tips): map hasher(twox_64_concat) T::Hash + pub Tips get(fn tips): + map hasher(twox_64_concat) T::Hash => Option, T::BlockNumber, T::Hash>>; /// Simple preimage lookup from the reason's hash to the original data. Again, has an /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. - pub Reasons get(fn reasons): map hasher(twox_64_concat) T::Hash => Option>; + pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; } add_extra_genesis { build(|_config| { @@ -349,9 +356,11 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedOperational(100_000)] fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::RejectOrigin::ensure_origin(origin)?; - let proposal = >::take(&proposal_id).ok_or(Error::::InvalidProposalIndex)?; + T::RejectOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + let proposal = >::take(&proposal_id).ok_or(Error::::InvalidProposalIndex)?; let value = proposal.bond; let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; T::ProposalRejection::on_unbalanced(imbalance); @@ -369,10 +378,11 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedOperational(100_000)] fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::ApproveOrigin::ensure_origin(origin)?; + T::ApproveOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; ensure!(>::contains_key(proposal_id), Error::::InvalidProposalIndex); - Approvals::mutate(|v| v.push(proposal_id)); } @@ -542,11 +552,13 @@ decl_module! { Self::payout_tip(tip); } - fn on_finalize(n: T::BlockNumber) { + fn on_initialize(n: T::BlockNumber) -> Weight { // Check to see if we should spend some funds! if (n % T::SpendPeriod::get()).is_zero() { Self::spend_funds(); } + + SimpleDispatchInfo::default().weigh_data(()) } } } @@ -620,7 +632,7 @@ impl Module { Self::retain_active_tips(&mut tips); tips.sort_by_key(|i| i.1); let treasury = Self::account_id(); - let max_payout = T::Currency::free_balance(&treasury); + let max_payout = Self::pot(); let mut payout = tips[tips.len() / 2].1.min(max_payout); if let Some((finder, deposit)) = tip.finder { let _ = T::Currency::unreserve(&finder, deposit); @@ -630,11 +642,11 @@ impl Module { payout -= finders_fee; // this should go through given we checked it's at most the free balance, but still // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &finder, finders_fee, AllowDeath); + let _ = T::Currency::transfer(&treasury, &finder, finders_fee, KeepAlive); } } // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, AllowDeath); + let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); } // Spend some money! @@ -686,7 +698,7 @@ impl Module { &Self::account_id(), imbalance, WithdrawReason::Transfer.into(), - ExistenceRequirement::KeepAlive + KeepAlive ) { print("Inconsistent state - couldn't settle imbalance for funds spent by treasury"); // Nothing else to do here. @@ -715,456 +727,3 @@ impl OnUnbalanced> for Module { Self::deposit_event(RawEvent::Deposit(numeric_amount)); } } - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{assert_noop, assert_ok, impl_outer_origin, parameter_types, weights::Weight}; - use frame_support::traits::Contains; - use sp_core::H256; - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, OnFinalize, IdentityLookup, BadOrigin}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - pub struct TenToFourteen; - impl Contains for TenToFourteen { - fn contains(n: &u64) -> bool { - *n >= 10 && *n <= 14 - } - fn sorted_members() -> Vec { - vec![10, 11, 12, 13, 14] - } - } - parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: u64 = 1; - pub const SpendPeriod: u64 = 2; - pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: u64 = 1; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: u64 = 1; - pub const TipReportDepositPerByte: u64 = 1; - } - impl Trait for Test { - type Currency = pallet_balances::Module; - type ApproveOrigin = frame_system::EnsureRoot; - type RejectOrigin = frame_system::EnsureRoot; - type Tippers = TenToFourteen; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type TipReportDepositPerByte = TipReportDepositPerByte; - type Event = (); - type ProposalRejection = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type SpendPeriod = SpendPeriod; - type Burn = Burn; - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Treasury = Module; - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); - t.into() - } - - #[test] - fn genesis_config_works() { - new_test_ext().execute_with(|| { - assert_eq!(Treasury::pot(), 0); - assert_eq!(Treasury::proposal_count(), 0); - }); - } - - fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u64)) - } - - #[test] - fn tip_new_cannot_be_used_twice() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - assert_noop!( - Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown - ); - }); - } - - #[test] - fn report_awesome_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - - // other reports don't count. - assert_noop!( - Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown - ); - - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 102); - assert_eq!(Balances::free_balance(3), 8); - }); - } - - #[test] - fn report_awesome_from_beneficiary_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u64)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 110); - }); - } - - #[test] - fn close_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); - - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::NONE, h.into()), BadOrigin); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); - }); - } - - #[test] - fn retract_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); - }); - } - - #[test] - fn tip_median_calculation_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); - } - - #[test] - fn tip_changing_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); - } - - #[test] - fn minting_works() { - new_test_ext().execute_with(|| { - // Check that accumulate works when we have Some value in Dummy already. - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - }); - } - - #[test] - fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); - } - - #[test] - fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); - } - - #[test] - fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { - assert_noop!( - Treasury::propose_spend(Origin::signed(2), 100, 3), - Error::::InsufficientProposersBalance, - ); - }); - } - - #[test] - fn accepted_spend_proposal_ignored_outside_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(1); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 100); - }); - } - - #[test] - fn unused_pot_should_diminish() { - new_test_ext().execute_with(|| { - let init_total_issuance = Balances::total_issuance(); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Balances::total_issuance(), init_total_issuance + 100); - - >::on_finalize(2); - assert_eq!(Treasury::pot(), 50); - assert_eq!(Balances::total_issuance(), init_total_issuance + 50); - }); - } - - #[test] - fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); - } - - #[test] - fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); - } - - #[test] - fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); - } - - #[test] - fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); - } - - #[test] - fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); - } - - #[test] - fn accepted_spend_proposal_enacted_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(Treasury::pot(), 0); - }); - } - - #[test] - fn pot_underflow_should_not_diminish() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - - let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); - >::on_finalize(4); - assert_eq!(Balances::free_balance(3), 150); // Fund has been spent - assert_eq!(Treasury::pot(), 25); // Pot has finally changed - }); - } - - // Treasury account doesn't get deleted if amount approved to spend is all its free balance. - // i.e. pot should not include existential deposit needed for account survival. - #[test] - fn treasury_account_doesnt_get_deleted() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - let treasury_balance = Balances::free_balance(&Treasury::account_id()); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_finalize(2); - assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - - assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); - - >::on_finalize(4); - assert_eq!(Treasury::pot(), 0); // Pot is emptied - assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there - }); - } - - // In case treasury account is not existing then it works fine. - // This is useful for chain that will just update runtime. - #[test] - fn inexistent_account_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); - // Treasury genesis config is not build thus treasury account does not exist - let mut t: sp_io::TestExternalities = t.into(); - - t.execute_with(|| { - assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist - assert_eq!(Treasury::pot(), 0); // Pot is empty - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); - >::on_finalize(2); - assert_eq!(Treasury::pot(), 0); // Pot hasn't changed - assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed - - Balances::make_free_balance_be(&Treasury::account_id(), 100); - assert_eq!(Treasury::pot(), 99); // Pot now contains funds - assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist - - >::on_finalize(4); - - assert_eq!(Treasury::pot(), 0); // Pot has changed - assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed - }); - } -} diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..5ad78dcad79356ce57eb59305cdf3a96d65682c6 --- /dev/null +++ b/frame/treasury/src/tests.rs @@ -0,0 +1,481 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Treasury pallet tests. + +#![cfg(test)] + +use super::*; +use std::cell::RefCell; +use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, parameter_types, weights::Weight, + traits::{Contains, OnInitialize} +}; +use sp_core::H256; +use sp_runtime::{ + Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup, BadOrigin}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} +thread_local! { + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); +} +pub struct TenToFourteen; +impl Contains for TenToFourteen { + fn sorted_members() -> Vec { + TEN_TO_FOURTEEN.with(|v| { + v.borrow().clone() + }) + } + #[cfg(feature = "runtime-benchmarks")] + fn add(new: &u64) { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); + members.push(*new); + members.sort(); + }) + } +} +parameter_types! { + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const TipCountdown: u64 = 1; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: u64 = 1; + pub const TipReportDepositPerByte: u64 = 1; +} +impl Trait for Test { + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Tippers = TenToFourteen; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type TipReportDepositPerByte = TipReportDepositPerByte; + type Event = (); + type ProposalRejection = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Treasury = Module; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); + t.into() +} + +#[test] +fn genesis_config_works() { + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); +} + +fn tip_hash() -> H256 { + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u64)) +} + +#[test] +fn tip_new_cannot_be_used_twice() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + assert_noop!( + Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Error::::AlreadyKnown + ); + }); +} + +#[test] +fn report_awesome_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + // other reports don't count. + assert_noop!( + Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 102); + assert_eq!(Balances::free_balance(3), 8); + }); +} + +#[test] +fn report_awesome_from_beneficiary_and_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u64)); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 110); + }); +} + +#[test] +fn close_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); + + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); + + System::set_block_number(2); + assert_noop!(Treasury::close_tip(Origin::NONE, h.into()), BadOrigin); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + + assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn retract_tip_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); + assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); + System::set_block_number(2); + assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); + }); +} + +#[test] +fn tip_median_calculation_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn tip_changing_works() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); +} + +#[test] +fn minting_works() { + new_test_ext().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn spend_proposal_takes_min_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(0), 99); + assert_eq!(Balances::reserved_balance(0), 1); + }); +} + +#[test] +fn spend_proposal_takes_proportional_deposit() { + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + }); +} + +#[test] +fn spend_proposal_fails_when_proposer_poor() { + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + Error::::InsufficientProposersBalance, + ); + }); +} + +#[test] +fn accepted_spend_proposal_ignored_outside_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_initialize(1); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 100); + }); +} + +#[test] +fn unused_pot_should_diminish() { + new_test_ext().execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Balances::total_issuance(), init_total_issuance + 100); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 50); + assert_eq!(Balances::total_issuance(), init_total_issuance + 50); + }); +} + +#[test] +fn rejected_spend_proposal_ignored_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 50); + }); +} + +#[test] +fn reject_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); + }); +} + +#[test] +fn reject_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); + }); +} + +#[test] +fn accept_non_existent_spend_proposal_fails() { + new_test_ext().execute_with(|| { + assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); + }); +} + +#[test] +fn accept_already_rejected_spend_proposal_fails() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); + }); +} + +#[test] +fn accepted_spend_proposal_enacted_on_spend_period() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Treasury::pot(), 0); + }); +} + +#[test] +fn pot_underflow_should_not_diminish() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); + >::on_initialize(4); + assert_eq!(Balances::free_balance(3), 150); // Fund has been spent + assert_eq!(Treasury::pot(), 25); // Pot has finally changed + }); +} + +// Treasury account doesn't get deleted if amount approved to spend is all its free balance. +// i.e. pot should not include existential deposit needed for account survival. +#[test] +fn treasury_account_doesnt_get_deleted() { + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + let treasury_balance = Balances::free_balance(&Treasury::account_id()); + + assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); + + >::on_initialize(4); + assert_eq!(Treasury::pot(), 0); // Pot is emptied + assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there + }); +} + +// In case treasury account is not existing then it works fine. +// This is useful for chain that will just update runtime. +#[test] +fn inexistent_account_works() { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig::{ + balances: vec![(0, 100), (1, 99), (2, 1)], + }.assimilate_storage(&mut t).unwrap(); + // Treasury genesis config is not build thus treasury account does not exist + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist + assert_eq!(Treasury::pot(), 0); // Pot is empty + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed + assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed + + Balances::make_free_balance_be(&Treasury::account_id(), 100); + assert_eq!(Treasury::pot(), 99); // Pot now contains funds + assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist + + >::on_initialize(4); + + assert_eq!(Treasury::pot(), 0); // Pot has changed + assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed + }); +} diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 46f59a191ae9f9122b209ce15b20825d324f248f..cf1042d8521da3e0c8c538eaef51c48de03b5439 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-utility" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,17 +10,19 @@ description = "FRAME utilities pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } + +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } [features] default = ["std"] @@ -33,3 +35,10 @@ std = [ "sp-io/std", "sp-std/std" ] +runtime-benchmarks = [ + "frame-benchmarking", + "frame-support/runtime-benchmarks", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..fc8783b49aa87aaf0813bcc467defae82abc7bf3 --- /dev/null +++ b/frame/utility/src/benchmarking.rs @@ -0,0 +1,169 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +// Benchmarks for Utility Pallet + +#![cfg(feature = "runtime-benchmarks")] + +use super::*; +use frame_system::RawOrigin; +use frame_benchmarking::{benchmarks, account}; +use sp_runtime::traits::Saturating; + +use crate::Module as Utility; + +const SEED: u32 = 0; + +fn setup_multi(s: u32, z: u32) -> Result<(Vec, Box<::Call>), &'static str>{ + let mut signatories: Vec = Vec::new(); + for i in 0 .. s { + let signatory = account("signatory", i, SEED); + // Give them some balance for a possible deposit + let deposit = T::MultisigDepositBase::get() + T::MultisigDepositFactor::get() * s.into(); + let balance = T::Currency::minimum_balance().saturating_mul(100.into()) + deposit; + T::Currency::make_free_balance_be(&signatory, balance); + signatories.push(signatory); + } + signatories.sort(); + let call: Box<::Call> = Box::new(frame_system::Call::remark(vec![0; z as usize]).into()); + return Ok((signatories, call)) +} + +benchmarks! { + _ { } + + batch { + let c in 0 .. 1000; + let mut calls: Vec<::Call> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark(vec![]).into(); + calls.push(call); + } + let caller = account("caller", 0, SEED); + }: _(RawOrigin::Signed(caller), calls) + + as_sub { + let u in 0 .. 1000; + let caller = account("caller", u, SEED); + let call = Box::new(frame_system::Call::remark(vec![]).into()); + }: _(RawOrigin::Signed(caller), u as u16, call) + + as_multi_create { + // Signatories, need at least 2 total people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call) + + as_multi_approve { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + let caller2 = signatories2.remove(0); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + + as_multi_complete { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + // Everyone except the first person approves + for i in 1 .. s - 1 { + let mut signatories_loop = signatories2.clone(); + let caller_loop = signatories_loop.remove(i as usize); + Utility::::as_multi(RawOrigin::Signed(caller_loop).into(), s as u16, signatories_loop, Some(timepoint), call.clone())?; + } + let caller2 = signatories2.remove(0); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + + approve_as_multi_create { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + // Create the multi + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash) + + approve_as_multi_approve { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + let caller2 = signatories2.remove(0); + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash) + + cancel_as_multi { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller.clone()).into(), s as u16, signatories.clone(), None, call.clone())?; + }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_batch::()); + assert_ok!(test_benchmark_as_sub::()); + assert_ok!(test_benchmark_as_multi_create::()); + assert_ok!(test_benchmark_as_multi_approve::()); + assert_ok!(test_benchmark_as_multi_complete::()); + assert_ok!(test_benchmark_approve_as_multi_create::()); + assert_ok!(test_benchmark_approve_as_multi_approve::()); + assert_ok!(test_benchmark_cancel_as_multi::()); + }); + } +} diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 0b60532c3dd23c35d09ff09717eb79d3217ef212..3cd6d103cc0940e4ab75d037c8f703ef8a0560d5 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -68,10 +68,14 @@ use sp_io::hashing::blake2_256; use frame_support::{decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug}; use frame_support::{traits::{Get, ReservableCurrency, Currency}, weights::{GetDispatchInfo, DispatchClass,FunctionOf}, + dispatch::PostDispatchInfo, }; use frame_system::{self as system, ensure_signed}; use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; +mod tests; +mod benchmarking; + type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. @@ -80,7 +84,7 @@ pub trait Trait: frame_system::Trait { type Event: From> + Into<::Event>; /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo; + type Call: Parameter + Dispatchable + GetDispatchInfo + From>; /// The currency mechanism. type Currency: ReservableCurrency; @@ -166,7 +170,8 @@ decl_event! { /// Events type. pub enum Event where AccountId = ::AccountId, - BlockNumber = ::BlockNumber + BlockNumber = ::BlockNumber, + CallHash = [u8; 32] { /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as /// well as the error. @@ -174,17 +179,17 @@ decl_event! { /// Batch of dispatches completed fully with no error. BatchCompleted, /// A new multisig operation has begun. First param is the account that is approving, - /// second is the multisig account. - NewMultisig(AccountId, AccountId), + /// second is the multisig account, third is hash of the call. + NewMultisig(AccountId, AccountId, CallHash), /// A multisig operation has been approved by someone. First param is the account that is - /// approving, third is the multisig account. - MultisigApproval(AccountId, Timepoint, AccountId), + /// approving, third is the multisig account, fourth is hash of the call. + MultisigApproval(AccountId, Timepoint, AccountId, CallHash), /// A multisig operation has been executed. First param is the account that is - /// approving, third is the multisig account. - MultisigExecuted(AccountId, Timepoint, AccountId, DispatchResult), + /// approving, third is the multisig account, fourth is hash of the call to be executed. + MultisigExecuted(AccountId, Timepoint, AccountId, CallHash, DispatchResult), /// A multisig operation has been cancelled. First param is the account that is - /// cancelling, third is the multisig account. - MultisigCancelled(AccountId, Timepoint, AccountId), + /// cancelling, third is the multisig account, fourth is hash of the call. + MultisigCancelled(AccountId, Timepoint, AccountId, CallHash), } } @@ -243,7 +248,7 @@ decl_module! { for (index, call) in calls.into_iter().enumerate() { let result = call.dispatch(origin.clone()); if let Err(e) = result { - Self::deposit_event(Event::::BatchInterrupted(index as u32, e)); + Self::deposit_event(Event::::BatchInterrupted(index as u32, e.error)); return Ok(()); } } @@ -266,6 +271,7 @@ decl_module! { let who = ensure_signed(origin)?; let pseudonym = Self::sub_account_id(who, index); call.dispatch(frame_system::RawOrigin::Signed(pseudonym).into()) + .map(|_| ()).map_err(|e| e.error) } /// Register approval for a dispatch to be made from a deterministic composite account if @@ -342,7 +348,7 @@ decl_module! { if (m.approvals.len() as u16) < threshold - 1 { m.approvals.insert(pos, who.clone()); >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id)); + Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); return Ok(()) } } else { @@ -354,7 +360,9 @@ decl_module! { let result = call.dispatch(frame_system::RawOrigin::Signed(id.clone()).into()); let _ = T::Currency::unreserve(&m.depositor, m.deposit); >::remove(&id, call_hash); - Self::deposit_event(RawEvent::MultisigExecuted(who, timepoint, id, result)); + Self::deposit_event(RawEvent::MultisigExecuted( + who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) + )); } else { ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); if threshold > 1 { @@ -367,9 +375,10 @@ decl_module! { depositor: who.clone(), approvals: vec![who.clone()], }); - Self::deposit_event(RawEvent::NewMultisig(who, id)); + Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); } else { return call.dispatch(frame_system::RawOrigin::Signed(id).into()) + .map(|_| ()).map_err(|e| e.error) } } Ok(()) @@ -436,7 +445,7 @@ decl_module! { if let Err(pos) = m.approvals.binary_search(&who) { m.approvals.insert(pos, who.clone()); >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id)); + Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); } else { Err(Error::::AlreadyApproved)? } @@ -452,7 +461,7 @@ decl_module! { depositor: who.clone(), approvals: vec![who.clone()], }); - Self::deposit_event(RawEvent::NewMultisig(who, id)); + Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); } else { Err(Error::::NoApprovalsNeeded)? } @@ -512,7 +521,7 @@ decl_module! { let _ = T::Currency::unreserve(&m.depositor, m.deposit); >::remove(&id, call_hash); - Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id)); + Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id, call_hash)); Ok(()) } } @@ -563,426 +572,3 @@ impl Module { Ok(signatories) } } - -#[cfg(test)] -mod tests { - use super::*; - - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event - }; - use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; - use crate as utility; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - utility, - } - } - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - utility::Utility, - } - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = TestEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - parameter_types! { - pub const MultisigDepositBase: u64 = 1; - pub const MultisigDepositFactor: u64 = 1; - pub const MaxSignatories: u16 = 3; - } - impl Trait for Test { - type Event = TestEvent; - type Call = Call; - type Currency = Balances; - type MultisigDepositBase = MultisigDepositBase; - type MultisigDepositFactor = MultisigDepositFactor; - type MaxSignatories = MaxSignatories; - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Utility = Module; - - use pallet_balances::Call as BalancesCall; - use pallet_balances::Error as BalancesError; - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 10)], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - fn last_event() -> TestEvent { - system::Module::::events().pop().map(|e| e.event).expect("Event expected") - } - - fn expect_event>(e: E) { - assert_eq!(last_event(), e.into()); - } - - fn now() -> Timepoint { - Utility::timepoint() - } - - #[test] - fn multisig_deposit_is_taken_and_returned() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_eq!(Balances::free_balance(1), 2); - assert_eq!(Balances::reserved_balance(1), 3); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn cancel_multisig_returns_deposit() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_eq!(Balances::free_balance(1), 6); - assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!( - Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::reserved_balance(1), 0); - }); - } - - #[test] - fn timepoint_checking_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - - assert_noop!( - Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone()), - Error::::UnexpectedTimepoint, - ); - - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); - - assert_noop!( - Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone()), - Error::::NoTimepoint, - ); - let later = Timepoint { index: 1, .. now() }; - assert_noop!( - Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone()), - Error::::WrongTimepoint, - ); - }); - } - - #[test] - fn multisig_2_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); - } - - #[test] - fn multisig_3_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); - } - - #[test] - fn cancel_multisig_works() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_noop!( - Utility::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), - Error::::NotOwner, - ); - assert_ok!( - Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); - }); - } - - #[test] - fn multisig_2_of_3_as_multi_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); - } - - #[test] - fn multisig_2_of_3_as_multi_with_many_calls_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call1 = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - let call2 = Box::new(Call::Balances(BalancesCall::transfer(7, 5))); - - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call1.clone())); - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call2.clone())); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call2)); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call1)); - - assert_eq!(Balances::free_balance(6), 10); - assert_eq!(Balances::free_balance(7), 5); - }); - } - - #[test] - fn multisig_2_of_3_cannot_reissue_same_call() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call.clone())); - assert_eq!(Balances::free_balance(multi), 5); - - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call)); - - let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); - expect_event(RawEvent::MultisigExecuted(3, now(), multi, Err(err))); - }); - } - - #[test] - fn zero_threshold_fails() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_noop!( - Utility::as_multi(Origin::signed(1), 0, vec![2], None, call), - Error::::ZeroThreshold, - ); - }); - } - - #[test] - fn too_many_signatories_fails() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_noop!( - Utility::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone()), - Error::::TooManySignatories, - ); - }); - } - - #[test] - fn duplicate_approvals_are_ignored() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone())); - assert_noop!( - Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone()), - Error::::AlreadyApproved, - ); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone())); - assert_noop!( - Utility::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone()), - Error::::NoApprovalsNeeded, - ); - }); - } - - #[test] - fn multisig_1_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 1); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_noop!( - Utility::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone()), - Error::::NoApprovalsNeeded, - ); - assert_noop!( - Utility::as_multi(Origin::signed(4), 1, vec![2, 3], None, call.clone()), - BalancesError::::InsufficientBalance, - ); - assert_ok!(Utility::as_multi(Origin::signed(1), 1, vec![2, 3], None, call)); - - assert_eq!(Balances::free_balance(6), 15); - }); - } - - #[test] - fn as_sub_works() { - new_test_ext().execute_with(|| { - let sub_1_0 = Utility::sub_account_id(1, 0); - assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); - assert_noop!(Utility::as_sub( - Origin::signed(1), - 1, - Box::new(Call::Balances(BalancesCall::transfer(6, 3))), - ), BalancesError::::InsufficientBalance); - assert_ok!(Utility::as_sub( - Origin::signed(1), - 0, - Box::new(Call::Balances(BalancesCall::transfer(2, 3))), - )); - assert_eq!(Balances::free_balance(sub_1_0), 2); - assert_eq!(Balances::free_balance(2), 13); - }); - } - - #[test] - fn batch_with_root_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!(Utility::batch(Origin::ROOT, vec![ - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - Call::Balances(BalancesCall::force_transfer(1, 2, 5)) - ])); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::free_balance(2), 20); - }); - } - - #[test] - fn batch_with_signed_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 5)) - ]), - ); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::free_balance(2), 20); - }); - } - - #[test] - fn batch_early_exit_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 10)), - Call::Balances(BalancesCall::transfer(2, 5)), - ]), - ); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::free_balance(2), 15); - }); - } -} diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..68bdabd6d9b7fe418bad496305a603f61551fbf7 --- /dev/null +++ b/frame/utility/src/tests.rs @@ -0,0 +1,442 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +// Tests for Utility Pallet + +#![cfg(test)] + +use super::*; + +use frame_support::{ + assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, + weights::Weight, impl_outer_event +}; +use sp_core::H256; +use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use crate as utility; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +impl_outer_event! { + pub enum TestEvent for Test { + system, + pallet_balances, + utility, + } +} +impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + frame_system::System, + pallet_balances::Balances, + utility::Utility, + } +} + +// For testing the pallet, we construct most of a mock runtime. This means +// first constructing a configuration type (`Test`) which `impl`s each of the +// configuration traits of pallets we want to use. +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); +} +parameter_types! { + pub const ExistentialDeposit: u64 = 1; +} +impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = TestEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; +} +parameter_types! { + pub const MultisigDepositBase: u64 = 1; + pub const MultisigDepositFactor: u64 = 1; + pub const MaxSignatories: u16 = 3; +} +impl Trait for Test { + type Event = TestEvent; + type Call = Call; + type Currency = Balances; + type MultisigDepositBase = MultisigDepositBase; + type MultisigDepositFactor = MultisigDepositFactor; + type MaxSignatories = MaxSignatories; +} +type System = frame_system::Module; +type Balances = pallet_balances::Module; +type Utility = Module; + +use pallet_balances::Call as BalancesCall; +use pallet_balances::Error as BalancesError; + +pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 10)], + }.assimilate_storage(&mut t).unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext +} + +fn last_event() -> TestEvent { + system::Module::::events().pop().map(|e| e.event).expect("Event expected") +} + +fn expect_event>(e: E) { + assert_eq!(last_event(), e.into()); +} + +fn now() -> Timepoint { + Utility::timepoint() +} + +#[test] +fn multisig_deposit_is_taken_and_returned() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + assert_eq!(Balances::free_balance(1), 2); + assert_eq!(Balances::reserved_balance(1), 3); + + assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn cancel_multisig_returns_deposit() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); + assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + assert_eq!(Balances::free_balance(1), 6); + assert_eq!(Balances::reserved_balance(1), 4); + assert_ok!( + Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), + ); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::reserved_balance(1), 0); + }); +} + +#[test] +fn timepoint_checking_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + + assert_noop!( + Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone()), + Error::::UnexpectedTimepoint, + ); + + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); + + assert_noop!( + Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone()), + Error::::NoTimepoint, + ); + let later = Timepoint { index: 1, .. now() }; + assert_noop!( + Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone()), + Error::::WrongTimepoint, + ); + }); +} + +#[test] +fn multisig_2_of_3_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_eq!(Balances::free_balance(6), 15); + }); +} + +#[test] +fn multisig_3_of_3_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 3); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); + assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), call)); + assert_eq!(Balances::free_balance(6), 15); + }); +} + +#[test] +fn cancel_multisig_works() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); + assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); + assert_noop!( + Utility::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Error::::NotOwner, + ); + assert_ok!( + Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), + ); + }); +} + +#[test] +fn multisig_2_of_3_as_multi_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); + assert_eq!(Balances::free_balance(6), 15); + }); +} + +#[test] +fn multisig_2_of_3_as_multi_with_many_calls_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call1 = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); + let call2 = Box::new(Call::Balances(BalancesCall::transfer(7, 5))); + + assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call1.clone())); + assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call2.clone())); + assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call2)); + assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call1)); + + assert_eq!(Balances::free_balance(6), 10); + assert_eq!(Balances::free_balance(7), 5); + }); +} + +#[test] +fn multisig_2_of_3_cannot_reissue_same_call() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); + assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call.clone())); + assert_eq!(Balances::free_balance(multi), 5); + + assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); + assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call.clone())); + + let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); + expect_event(RawEvent::MultisigExecuted(3, now(), multi, call.using_encoded(blake2_256), Err(err))); + }); +} + +#[test] +fn zero_threshold_fails() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_noop!( + Utility::as_multi(Origin::signed(1), 0, vec![2], None, call), + Error::::ZeroThreshold, + ); + }); +} + +#[test] +fn too_many_signatories_fails() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_noop!( + Utility::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone()), + Error::::TooManySignatories, + ); + }); +} + +#[test] +fn duplicate_approvals_are_ignored() { + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone())); + assert_noop!( + Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone()), + Error::::AlreadyApproved, + ); + assert_ok!(Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone())); + assert_noop!( + Utility::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone()), + Error::::NoApprovalsNeeded, + ); + }); +} + +#[test] +fn multisig_1_of_3_works() { + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 1); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_noop!( + Utility::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone()), + Error::::NoApprovalsNeeded, + ); + assert_noop!( + Utility::as_multi(Origin::signed(4), 1, vec![2, 3], None, call.clone()), + BalancesError::::InsufficientBalance, + ); + assert_ok!(Utility::as_multi(Origin::signed(1), 1, vec![2, 3], None, call)); + + assert_eq!(Balances::free_balance(6), 15); + }); +} + +#[test] +fn as_sub_works() { + new_test_ext().execute_with(|| { + let sub_1_0 = Utility::sub_account_id(1, 0); + assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); + assert_noop!(Utility::as_sub( + Origin::signed(1), + 1, + Box::new(Call::Balances(BalancesCall::transfer(6, 3))), + ), BalancesError::::InsufficientBalance); + assert_ok!(Utility::as_sub( + Origin::signed(1), + 0, + Box::new(Call::Balances(BalancesCall::transfer(2, 3))), + )); + assert_eq!(Balances::free_balance(sub_1_0), 2); + assert_eq!(Balances::free_balance(2), 13); + }); +} + +#[test] +fn batch_with_root_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch(Origin::ROOT, vec![ + Call::Balances(BalancesCall::force_transfer(1, 2, 5)), + Call::Balances(BalancesCall::force_transfer(1, 2, 5)) + ])); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); +} + +#[test] +fn batch_with_signed_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!( + Utility::batch(Origin::signed(1), vec![ + Call::Balances(BalancesCall::transfer(2, 5)), + Call::Balances(BalancesCall::transfer(2, 5)) + ]), + ); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); +} + +#[test] +fn batch_early_exit_works() { + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!( + Utility::batch(Origin::signed(1), vec![ + Call::Balances(BalancesCall::transfer(2, 5)), + Call::Balances(BalancesCall::transfer(2, 10)), + Call::Balances(BalancesCall::transfer(2, 5)), + ]), + ); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + }); +} diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 882c062c438355a29b68d63d8124de4a100e2b27..e40062706ff8559101e0390300b2a8af58a4174c 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "pallet-vesting" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,19 +10,19 @@ description = "FRAME pallet for manage vesting" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } -frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-alpha.5", default-features = false, path = "../benchmarking", optional = true } [dev-dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } -sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.5", path = "../balances" } +sp-storage = { version = "2.0.0-alpha.5", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] @@ -36,4 +36,7 @@ std = [ "frame-support/std", "frame-system/std", ] -runtime-benchmarks = ["frame-benchmarking", "frame-system/runtime-benchmarks"] +runtime-benchmarks = ["frame-benchmarking"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index 79ab0cb6e329ee577d094d5567bde037550379ab..be2cb4cb2b0aad0a2fe1578a3f440dc101225811 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -16,6 +16,8 @@ //! Vesting pallet benchmarking. +#![cfg(feature = "runtime-benchmarks")] + use super::*; use frame_system::{RawOrigin, Module as System}; @@ -44,6 +46,7 @@ fn setup(b: u32) -> T::AccountId { let starting_block = 0; let caller = account("caller", 0, SEED); + System::::set_block_number(0.into()); // Add schedule to avoid `NotVesting` error. let _ = Vesting::::add_vesting_schedule( @@ -63,32 +66,44 @@ fn setup(b: u32) -> T::AccountId { benchmarks! { _ { - // Current block. It allows to hit different paths of `update_lock`. - // It doesn't seems to influence the timings which branch is taken. - let b in 0 .. 1 => (); // Number of previous locks. // It doesn't seems to influence the timings for lower values. let l in 0 .. MAX_LOCKS => add_locks::(l); } - vest { - let b in ...; + vest_locked { + let l in ...; + + let caller = setup::(0u32); + + }: vest(RawOrigin::Signed(caller)) + + vest_not_locked { + let l in ...; + + let caller = setup::(1u32); + + }: vest(RawOrigin::Signed(caller)) + + vest_other_locked { let l in ...; - let caller = setup::(b); + let other: T::AccountId = setup::(0u32); + let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + + let caller = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller)) + }: vest_other(RawOrigin::Signed(caller), other_lookup) - vest_other { - let b in ...; + vest_other_not_locked { let l in ...; - let other: T::AccountId = setup::(b); + let other: T::AccountId = setup::(1u32); let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); let caller = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller), other_lookup) + }: vest_other(RawOrigin::Signed(caller), other_lookup) vested_transfer { let u in 0 .. 1000; @@ -106,6 +121,24 @@ benchmarks! { }; let _ = T::Currency::make_free_balance_be(&from, transfer_amount * 10.into()); - + }: _(RawOrigin::Signed(from), to_lookup, vesting_schedule) } + +#[cfg(test)] +mod tests { + use super::*; + use crate::tests::{ExtBuilder, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + ExtBuilder::default().existential_deposit(256).build().execute_with(|| { + assert_ok!(test_benchmark_vest_locked::()); + assert_ok!(test_benchmark_vest_not_locked::()); + assert_ok!(test_benchmark_vest_other_locked::()); + assert_ok!(test_benchmark_vest_other_not_locked::()); + assert_ok!(test_benchmark_vested_transfer::()); + }); + } +} diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index e7d0bd3c185a2b45a8a0cd21c15f358602ea160e..b0c98e78bd6f7243e4c017f133726fccf16407a9 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -54,13 +54,12 @@ use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ }}; use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; use frame_support::traits::{ - Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, ExistenceRequirement, - Get, + Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, + ExistenceRequirement, Get }; use frame_support::weights::SimpleDispatchInfo; use frame_system::{self as system, ensure_signed}; -#[cfg(feature = "runtime-benchmarks")] mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -118,7 +117,8 @@ decl_storage! { trait Store for Module as Vesting { /// Information regarding the vesting of a given account. pub Vesting get(fn vesting): - map hasher(blake2_256) T::AccountId => Option, T::BlockNumber>>; + map hasher(blake2_128_concat) T::AccountId + => Option, T::BlockNumber>>; } add_extra_genesis { config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>; @@ -194,6 +194,7 @@ decl_module! { /// - One storage read (codec `O(1)`) and up to one removal. /// - One event. /// # + #[weight = SimpleDispatchInfo::default()] fn vest(origin) -> DispatchResult { let who = ensure_signed(origin)?; Self::update_lock(who) @@ -215,6 +216,7 @@ decl_module! { /// - One storage read (codec `O(1)`) and up to one removal. /// - One event. /// # + #[weight = SimpleDispatchInfo::default()] fn vest_other(origin, target: ::Source) -> DispatchResult { ensure_signed(origin)?; Self::update_lock(T::Lookup::lookup(target)?) @@ -449,7 +451,9 @@ mod tests { (12, 10, 20, 5 * self.existential_deposit) ], }.assimilate_storage(&mut t).unwrap(); - t.into() + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } } @@ -459,7 +463,6 @@ mod tests { .existential_deposit(256) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user1_free_balance = Balances::free_balance(&1); let user2_free_balance = Balances::free_balance(&2); let user12_free_balance = Balances::free_balance(&12); @@ -518,7 +521,6 @@ mod tests { .existential_deposit(10) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) @@ -536,7 +538,6 @@ mod tests { .existential_deposit(10) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) @@ -552,7 +553,6 @@ mod tests { .existential_deposit(10) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) @@ -568,7 +568,6 @@ mod tests { .existential_deposit(10) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); @@ -596,7 +595,6 @@ mod tests { .existential_deposit(256) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user12_free_balance = Balances::free_balance(&12); assert_eq!(user12_free_balance, 2560); // Account 12 has free balance @@ -622,7 +620,6 @@ mod tests { .existential_deposit(256) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user3_free_balance = Balances::free_balance(&3); let user4_free_balance = Balances::free_balance(&4); assert_eq!(user3_free_balance, 256 * 30); @@ -666,7 +663,6 @@ mod tests { .existential_deposit(256) .build() .execute_with(|| { - assert_eq!(System::block_number(), 1); let user2_free_balance = Balances::free_balance(&2); let user4_free_balance = Balances::free_balance(&4); assert_eq!(user2_free_balance, 256 * 20); diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index 944cfa6de19d57c8f9a010b5b1ab831f69adf31f..635a3c9128febcd6fd46690ea3e91043d935e70f 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-allocator" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,9 +10,9 @@ description = "Collection of allocator implementations." documentation = "https://docs.rs/sp-allocator" [dependencies] -sp-std = { version = "2.0.0-alpha.2", path = "../std", default-features = false } -sp-core = { version = "2.0.0-alpha.2", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-alpha.5", path = "../std", default-features = false } +sp-core = { version = "2.0.0-alpha.5", path = "../core", default-features = false } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../wasm-interface", default-features = false } log = { version = "0.4.8", optional = true } derive_more = { version = "0.99.2", optional = true } @@ -25,3 +25,6 @@ std = [ "log", "derive_more", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index a33ff0fa0d75453077cec6650c8c62af54022e91..fedeeceb3faadfb8e3ec6ff19b7204aa97e6904b 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,13 +9,13 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate runtime api primitives" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-api-proc-macro = { version = "2.0.0-alpha.2", path = "proc-macro" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-api-proc-macro = { version = "2.0.0-alpha.5", path = "proc-macro" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../version" } +sp-state-machine = { version = "0.8.0-alpha.5", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] @@ -32,3 +32,6 @@ std = [ "sp-version/std", "hash-db", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 940e21759642fdd12fc6339a34a2e67410f956ac..25c5ae13435a5a107cee146acc2a0661afea8c9f 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -24,3 +24,6 @@ proc-macro-crate = "0.1.4" [features] default = [ "std" ] std = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index 459707bdb53f4425153d7dc1bc631566379fc092..e9f3087912e26f322214fa22cdbca397a4a2c332 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -19,7 +19,7 @@ use crate::utils::{ fold_fn_decl_for_client_side, extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, return_type_extract_type, generate_method_runtime_api_impl_name, generate_call_api_at_fn_name, prefix_function_with_trait, - replace_wild_card_parameter_names, + replace_wild_card_parameter_names, AllowSelfRefInParameters, }; use proc_macro2::{TokenStream, Span}; @@ -198,7 +198,7 @@ fn generate_native_call_generators(decl: &ItemTrait) -> Result { // Generate a native call generator for each function of the given trait. for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(&fn_)?; + let params = extract_parameter_names_types_and_borrows(&fn_, AllowSelfRefInParameters::No)?; let trait_fn_name = &fn_.ident; let fn_name = generate_native_call_generator_fn_name(&fn_.ident); let output = return_type_replace_block_with_node_block(fn_.output.clone()); @@ -592,7 +592,10 @@ impl<'a> ToClientSideDecl<'a> { // Get types and if the value is borrowed from all parameters. // If there is an error, we push it as the block to the user. - let param_types = match extract_parameter_names_types_and_borrows(fn_sig) { + let param_types = match extract_parameter_names_types_and_borrows( + fn_sig, + AllowSelfRefInParameters::No, + ) { Ok(res) => res.into_iter().map(|v| { let ty = v.1; let borrow = v.2; @@ -629,7 +632,10 @@ impl<'a> ToClientSideDecl<'a> { mut method: TraitItemMethod, context: TokenStream, ) -> TraitItemMethod { - let params = match extract_parameter_names_types_and_borrows(&method.sig) { + let params = match extract_parameter_names_types_and_borrows( + &method.sig, + AllowSelfRefInParameters::No, + ) { Ok(res) => res.into_iter().map(|v| v.0).collect::>(), Err(e) => { self.errors.push(e.to_compile_error()); @@ -780,7 +786,7 @@ fn generate_runtime_api_id(trait_name: &str) -> TokenStream { let mut res = [0; 8]; res.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], trait_name.as_bytes()).as_bytes()); - quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) + quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) } /// Generates the const variable that holds the runtime api version. diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index e16cf3b5c46f78f50fbc03f5bff0f699dbedf503..7def6aa0fb73af3cca94b33ab1bb9ed7d6a8f70e 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -19,7 +19,8 @@ use crate::utils::{ generate_runtime_mod_name_for_trait, generate_method_runtime_api_impl_name, extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, return_type_extract_type, generate_call_api_at_fn_name, prefix_function_with_trait, - extract_all_signature_types, + extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, + AllowSelfRefInParameters, RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -66,7 +67,7 @@ fn generate_impl_call( input: &Ident, impl_trait: &Path ) -> Result { - let params = extract_parameter_names_types_and_borrows(signature)?; + let params = extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; let c = generate_crate_access(HIDDEN_INCLUDES_ID); let c_iter = iter::repeat(&c); @@ -93,50 +94,6 @@ fn generate_impl_call( ) } -/// Extract the trait that is implemented in the given `ItemImpl`. -fn extract_impl_trait<'a>(impl_: &'a ItemImpl) -> Result<&'a Path> { - impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( - || Error::new(impl_.span(), "Only implementation of traits are supported!") - ).and_then(|p| { - if p.segments.len() > 1 { - Ok(p) - } else { - Err( - Error::new( - p.span(), - "The implemented trait has to be referenced with a path, \ - e.g. `impl client::Core for Runtime`." - ) - ) - } - }) -} - -/// Extracts the runtime block identifier. -fn extract_runtime_block_ident(trait_: &Path) -> Result<&TypePath> { - let span = trait_.span(); - let generics = trait_ - .segments - .last() - .ok_or_else(|| Error::new(span, "Empty path not supported"))?; - - match &generics.arguments { - PathArguments::AngleBracketed(ref args) => { - args.args.first().and_then(|v| match v { - GenericArgument::Type(Type::Path(ref block)) => Some(block), - _ => None - }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) - }, - PathArguments::None => { - let span = trait_.segments.last().as_ref().unwrap().span(); - Err(Error::new(span, "Missing `Block` generic parameter.")) - }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - } - } -} - /// Generate all the implementation calls for the given functions. fn generate_impl_calls( impls: &[ItemImpl], @@ -145,7 +102,7 @@ fn generate_impl_calls( let mut impl_calls = Vec::new(); for impl_ in impls { - let impl_trait_path = extract_impl_trait(impl_)?; + let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); let impl_trait_ident = &impl_trait_path .segments @@ -307,11 +264,19 @@ fn generate_runtime_api_base_structures() -> Result { res } - fn runtime_version_at( + fn has_api( + &self, + at: &#crate_::BlockId, + ) -> std::result::Result where Self: Sized { + self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) + } + + fn has_api_with bool>( &self, at: &#crate_::BlockId, - ) -> std::result::Result<#crate_::RuntimeVersion, C::Error> { - self.call.runtime_version_at(at) + pred: P, + ) -> std::result::Result where Self: Sized { + self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) } fn record_proof(&mut self) { @@ -450,7 +415,7 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { // we put the `RuntimeBlock` as first argument for the trait generics. for impl_ in impls.iter() { let mut impl_ = impl_.clone(); - let trait_ = extract_impl_trait(&impl_)?.clone(); + let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); let trait_ = extend_with_runtime_decl_path(trait_); impl_.trait_.as_mut().unwrap().1 = trait_; @@ -506,7 +471,10 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { }).collect::>() }; - let (param_types, error) = match extract_parameter_names_types_and_borrows(&input.sig) { + let (param_types, error) = match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::No, + ) { Ok(res) => ( res.into_iter().map(|v| { let ty = v.1; @@ -645,13 +613,13 @@ fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result let mut result = Vec::with_capacity(impls.len()); for impl_ in impls { - let impl_trait_path = extract_impl_trait(&impl_)?; + let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?; let impl_trait = &impl_trait_path .segments .last() .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? .clone(); - let runtime_block = extract_runtime_block_ident(impl_trait_path)?; + let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; let runtime_type = &impl_.self_ty; let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); // remove the trait to get just the module path @@ -682,7 +650,9 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let mut processed_traits = HashSet::new(); for impl_ in impls { - let mut path = extend_with_runtime_decl_path(extract_impl_trait(&impl_)?.clone()); + let mut path = extend_with_runtime_decl_path( + extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(), + ); // Remove the trait let trait_ = path .segments diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 9e5908717c18955cf45e3fefb4a95d12ea9d7fb8..12f435bd166dff2a579346ff8a710faec6bf89ba 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -21,6 +21,7 @@ use proc_macro::TokenStream; mod impl_runtime_apis; +mod mock_impl_runtime_apis; mod decl_runtime_apis; mod utils; @@ -29,6 +30,11 @@ pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { impl_runtime_apis::impl_runtime_apis_impl(input) } +#[proc_macro] +pub fn mock_impl_runtime_apis(input: TokenStream) -> TokenStream { + mock_impl_runtime_apis::mock_impl_runtime_apis_impl(input) +} + #[proc_macro] pub fn decl_runtime_apis(input: TokenStream) -> TokenStream { decl_runtime_apis::decl_runtime_apis_impl(input) diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs new file mode 100644 index 0000000000000000000000000000000000000000..0767c804a637e2983c52affc97c69f4aea47ce62 --- /dev/null +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -0,0 +1,378 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::utils::{ + generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, extract_parameter_names_types_and_borrows, + return_type_extract_type, extract_block_type_from_trait_path, extract_impl_trait, + AllowSelfRefInParameters, RequireQualifiedTraitPath, +}; + +use proc_macro2::{Span, TokenStream}; + +use quote::quote; + +use syn::{ + spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, ImplItem, TypePath, parse_quote, + parse::{Parse, ParseStream, Result, Error}, fold::{self, Fold}, +}; + +/// Unique identifier used to make the hidden includes unique for this macro. +const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; + +/// The structure used for parsing the runtime api implementations. +struct RuntimeApiImpls { + impls: Vec, +} + +impl Parse for RuntimeApiImpls { + fn parse(input: ParseStream) -> Result { + let mut impls = Vec::new(); + + while !input.is_empty() { + impls.push(ItemImpl::parse(input)?); + } + + if impls.is_empty() { + Err(Error::new(Span::call_site(), "No api implementation given!")) + } else { + Ok(Self { impls }) + } + } +} + +/// Implement the `ApiExt` trait, `ApiErrorExt` trait and the `Core` runtime api. +fn implement_common_api_traits( + error_type: Option, + block_type: TypePath, + self_ty: Type, +) -> Result { + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + let error_type = error_type.map(|e| quote!(#e)).unwrap_or_else(|| quote!(String)); + + Ok(quote!( + impl #crate_::ApiErrorExt for #self_ty { + type Error = #error_type; + } + + impl #crate_::ApiExt<#block_type> for #self_ty { + type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; + + fn map_api_result std::result::Result, R, E>( + &self, + map_call: F, + ) -> std::result::Result where Self: Sized { + map_call(self) + } + + fn has_api( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result where Self: Sized { + Ok(true) + } + + fn has_api_with bool>( + &self, + at: &#crate_::BlockId<#block_type>, + pred: P, + ) -> std::result::Result where Self: Sized { + Ok(pred(A::VERSION)) + } + + fn record_proof(&mut self) { + unimplemented!("`record_proof` not implemented for runtime api mocks") + } + + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + unimplemented!("`extract_proof` not implemented for runtime api mocks") + } + + fn into_storage_changes( + &self, + _: &Self::StateBackend, + _: Option<&#crate_::ChangesTrieState< + #crate_::HashFor<#block_type>, + #crate_::NumberFor<#block_type>, + >>, + _: <#block_type as #crate_::BlockT>::Hash, + ) -> std::result::Result< + #crate_::StorageChanges, + String + > where Self: Sized { + unimplemented!("`into_storage_changes` not implemented for runtime api mocks") + } + } + + impl #crate_::Core<#block_type> for #self_ty { + fn Core_version_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<()>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #error_type> { + unimplemented!("Not required for testing!") + } + + fn Core_execute_block_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<#block_type>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + unimplemented!("Not required for testing!") + } + + fn Core_initialize_block_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<&<#block_type as #crate_::BlockT>::Header>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + unimplemented!("Not required for testing!") + } + } + )) +} + +/// Auxialiry structure to fold a runtime api trait implementation into the expected format. +/// +/// This renames the methods, changes the method parameters and extracts the error type. +struct FoldRuntimeApiImpl<'a> { + /// The block type that is being used. + block_type: &'a TypePath, + /// The identifier of the trait being implemented. + impl_trait: &'a Ident, + /// Stores the error type that is being found in the trait implementation as associated type + /// with the name `Error`. + error_type: &'a mut Option, +} + +impl<'a> Fold for FoldRuntimeApiImpl<'a> { + fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + let block = { + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + let (param_names, param_types, error) = match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::YesButIgnore, + ) { + Ok(res) => ( + res.iter().map(|v| v.0.clone()).collect::>(), + res.iter().map(|v| { + let ty = &v.1; + let borrow = &v.2; + quote!( #borrow #ty ) + }).collect::>(), + None + ), + Err(e) => (Vec::new(), Vec::new(), Some(e.to_compile_error())), + }; + + let block_type = &self.block_type; + + // Rewrite the input parameters. + input.sig.inputs = parse_quote! { + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + _: Vec, + }; + + input.sig.ident = generate_method_runtime_api_impl_name( + &self.impl_trait, + &input.sig.ident, + ); + let ret_type = return_type_extract_type(&input.sig.output); + + // Generate the correct return type. + input.sig.output = parse_quote!( + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error> + ); + + let orig_block = input.block.clone(); + + // Generate the new method implementation that calls into the runtime. + parse_quote!( + { + // Get the error to the user (if we have one). + #error + + let (#( #param_names ),*) = params + .expect("Mocked runtime apis don't support calling deprecated api versions"); + + let __fn_implementation__ = move || #orig_block; + + Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + } + ) + }; + + let mut input = fold::fold_impl_item_method(self, input); + // We need to set the block, after we modified the rest of the ast, otherwise we would + // modify our generated block as well. + input.block = block; + input + } + + fn fold_impl_item(&mut self, input: ImplItem) -> ImplItem { + match input { + ImplItem::Type(ty) => { + if ty.ident == "Error" { + if let Some(error_type) = self.error_type { + if *error_type != ty.ty { + let error = Error::new( + ty.span(), + "Error type can not change between runtime apis", + ); + ImplItem::Verbatim(error.to_compile_error()) + } else { + ImplItem::Verbatim(Default::default()) + } + } else { + *self.error_type = Some(ty.ty); + ImplItem::Verbatim(Default::default()) + } + } else { + let error = Error::new( + ty.span(), + "Only associated type with name `Error` is allowed", + ); + ImplItem::Verbatim(error.to_compile_error()) + } + }, + o => fold::fold_impl_item(self, o), + } + } +} + +/// Result of [`generate_runtime_api_impls`]. +struct GeneratedRuntimeApiImpls { + /// All the runtime api implementations. + impls: TokenStream, + /// The error type that should be used by the runtime apis. + error_type: Option, + /// The block type that is being used by the runtime apis. + block_type: TypePath, + /// The type the traits are implemented for. + self_ty: Type, +} + +/// Generate the runtime api implementations from the given trait implementations. +/// +/// This folds the method names, changes the method parameters, method return type, +/// extracts the error type, self type and the block type. +fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { + let mut result = Vec::with_capacity(impls.len()); + let mut error_type = None; + let mut global_block_type: Option = None; + let mut self_ty: Option> = None; + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::No)?; + let impl_trait = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .clone(); + let block_type = extract_block_type_from_trait_path(impl_trait_path)?; + + self_ty = match self_ty.take() { + Some(self_ty) => { + if self_ty == impl_.self_ty { + Some(self_ty) + } else { + let mut error =Error::new( + impl_.self_ty.span(), + "Self type should not change between runtime apis", + ); + + error.combine(Error::new( + self_ty.span(), + "First self type found here", + )); + + return Err(error) + } + }, + None => Some(impl_.self_ty.clone()), + }; + + global_block_type = match global_block_type.take() { + Some(global_block_type) => { + if global_block_type == *block_type { + Some(global_block_type) + } else { + let mut error = Error::new( + block_type.span(), + "Block type should be the same between all runtime apis.", + ); + + error.combine(Error::new( + global_block_type.span(), + "First block type found here", + )); + + return Err(error) + } + }, + None => Some(block_type.clone()), + }; + + let mut visitor = FoldRuntimeApiImpl { + block_type, + impl_trait: &impl_trait.ident, + error_type: &mut error_type, + }; + + result.push(visitor.fold_item_impl(impl_.clone())); + } + + Ok(GeneratedRuntimeApiImpls { + impls: quote!( #( #result )* ), + error_type, + block_type: global_block_type.expect("There is a least one runtime api; qed"), + self_ty: *self_ty.expect("There is at least one runtime api; qed"), + }) +} + +/// The implementation of the `mock_impl_runtime_apis!` macro. +pub fn mock_impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { + // Parse all impl blocks + let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); + + mock_impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() +} + +fn mock_impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let GeneratedRuntimeApiImpls { impls, error_type, block_type, self_ty } = + generate_runtime_api_impls(api_impls)?; + let api_traits = implement_common_api_traits(error_type, block_type, self_ty)?; + + Ok(quote!( + #hidden_includes + + #impls + + #api_traits + )) +} diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 8330624bf26d752679ce22afff35a989168a6379..1a79cf6c1ef5b5f046ac98229b3f4203f46ce45b 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -18,7 +18,7 @@ use proc_macro2::{TokenStream, Span}; use syn::{ Result, Ident, Signature, parse_quote, Type, Pat, spanned::Spanned, FnArg, Error, token::And, - ImplItem, ReturnType, + ImplItem, ReturnType, PathArguments, Path, GenericArgument, TypePath, ItemImpl, }; use quote::quote; @@ -126,13 +126,21 @@ pub fn generate_unique_pattern(pat: Pat, counter: &mut u32) -> Pat { }, _ => pat, } - } +} + +/// Allow `&self` in parameters of a method. +pub enum AllowSelfRefInParameters { + /// Allows `&self` in parameters, but doesn't return it as part of the parameters. + YesButIgnore, + No, +} /// Extracts the name, the type and `&` or ``(if it is a reference or not) /// for each parameter in the given function signature. -pub fn extract_parameter_names_types_and_borrows(sig: &Signature) - -> Result)>> -{ +pub fn extract_parameter_names_types_and_borrows( + sig: &Signature, + allow_self: AllowSelfRefInParameters, +) -> Result)>> { let mut result = Vec::new(); let mut generated_pattern_counter = 0; for input in sig.inputs.iter() { @@ -145,13 +153,20 @@ pub fn extract_parameter_names_types_and_borrows(sig: &Signature) t => { (t.clone(), None) }, }; - let name = - generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); + let name = generate_unique_pattern( + (*arg.pat).clone(), + &mut generated_pattern_counter, + ); result.push((name, ty, borrow)); }, - FnArg::Receiver(_) => { + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { return Err(Error::new(input.span(), "`self` parameter not supported!")) - } + }, + FnArg::Receiver(recv) => { + if recv.mutability.is_some() || recv.reference.is_none() { + return Err(Error::new(recv.span(), "Only `&self` is supported!")) + } + }, } } @@ -199,3 +214,60 @@ pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { .flatten() .collect() } + +/// Extracts the block type from a trait path. +/// +/// It is expected that the block type is the first type in the generic arguments. +pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { + let span = trait_.span(); + let generics = trait_ + .segments + .last() + .ok_or_else(|| Error::new(span, "Empty path not supported"))?; + + match &generics.arguments { + PathArguments::AngleBracketed(ref args) => { + args.args.first().and_then(|v| match v { + GenericArgument::Type(Type::Path(ref block)) => Some(block), + _ => None + }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) + }, + PathArguments::None => { + let span = trait_.segments.last().as_ref().unwrap().span(); + Err(Error::new(span, "Missing `Block` generic parameter.")) + }, + PathArguments::Parenthesized(_) => { + Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) + }, + } +} + +/// Should a qualified trait path be required? +/// +/// e.g. `path::Trait` is qualified and `Trait` is not. +pub enum RequireQualifiedTraitPath { + Yes, + No, +} + +/// Extract the trait that is implemented by the given `ItemImpl`. +pub fn extract_impl_trait<'a>( + impl_: &'a ItemImpl, + require: RequireQualifiedTraitPath, +) -> Result<&'a Path> { + impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( + || Error::new(impl_.span(), "Only implementation of traits are supported!") + ).and_then(|p| { + if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { + Ok(p) + } else { + Err( + Error::new( + p.span(), + "The implemented trait has to be referenced with a path, \ + e.g. `impl client::Core for Runtime`." + ) + ) + } + }) +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index 0901be5831dfd9758bdc59b466302963ad8f0fb8..74bcf19a9949e43316001d34b7ccfe3bda2b73cb 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -36,7 +36,7 @@ extern crate self as sp_api; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, + OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, InMemoryBackend, }; #[doc(hidden)] #[cfg(feature = "std")] @@ -78,7 +78,8 @@ use std::{panic::UnwindSafe, cell::RefCell}; /// declaration. Besides one exception, the macro adds an extra generic parameter `Block: BlockT` /// to the client side and the runtime side. This generic parameter is usable by the user. /// -/// For implementing these macros you should use the `impl_runtime_apis!` macro. +/// For implementing these macros you should use the +/// [`impl_runtime_apis!`](macro.impl_runtime_apis.html) macro. /// /// # Example /// @@ -143,8 +144,9 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// Tags given trait implementations as runtime apis. /// -/// All traits given to this macro, need to be declared with the `decl_runtime_apis!` macro. -/// The implementation of the trait should follow the declaration given to the `decl_runtime_apis!` +/// All traits given to this macro, need to be declared with the +/// [`decl_runtime_apis!`](macro.decl_runtime_apis.html) macro. The implementation of the trait +/// should follow the declaration given to the [`decl_runtime_apis!`](macro.decl_runtime_apis.html) /// macro, besides the `Block` type that is required as first generic parameter for each runtime /// api trait. When implementing a runtime api trait, it is required that the trait is referenced /// by a path, e.g. `impl my_trait::MyTrait for Runtime`. The macro will use this path to access @@ -182,7 +184,7 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// # } /// # pub trait BlockBuilder { /// # fn build_block() -> Block; -/// # } +/// # } /// # } /// /// /// All runtime api implementations need to be done in one call of the macro! @@ -226,6 +228,70 @@ pub use sp_api_proc_macro::decl_runtime_apis; /// ``` pub use sp_api_proc_macro::impl_runtime_apis; +/// Mocks given trait implementations as runtime apis. +/// +/// Accepts similar syntax as [`impl_runtime_apis!`](macro.impl_runtime_apis.html) and generates +/// simplified mock implementations of the given runtime apis. The difference in syntax is that the +/// trait does not need to be referenced by a qualified path, methods accept the `&self` parameter +/// and the error type can be specified as associated type. If no error type is specified `String` +/// is used as error type. +/// +/// Besides implementing the given traits, the [`Core`], [`ApiExt`] and [`ApiErrorExt`] are +/// implemented automatically. +/// +/// # Example +/// +/// ```rust +/// use sp_version::create_runtime_str; +/// # +/// # use sp_runtime::traits::Block as BlockT; +/// # use sp_test_primitives::Block; +/// # +/// # sp_api::decl_runtime_apis! { +/// # /// Declare the api trait. +/// # pub trait Balance { +/// # /// Get the balance. +/// # fn get_balance() -> u64; +/// # /// Set the balance. +/// # fn set_balance(val: u64); +/// # } +/// # pub trait BlockBuilder { +/// # fn build_block() -> Block; +/// # } +/// # } +/// +/// struct MockApi { +/// balance: u64, +/// } +/// +/// /// All runtime api mock implementations need to be done in one call of the macro! +/// sp_api::mock_impl_runtime_apis! { +/// impl Balance for MockApi { +/// /// Here we take the `&self` to access the instance. +/// fn get_balance(&self) -> u64 { +/// self.balance +/// } +/// fn set_balance(_bal: u64) { +/// // Store the balance +/// } +/// } +/// +/// impl BlockBuilder for MockApi { +/// /// Sets the error type that is being used by the mock implementation. +/// /// The error type is used by all runtime apis. It is only required to +/// /// be specified in one trait implementation. +/// type Error = String; +/// +/// fn build_block() -> Block { +/// unimplemented!("Not Required in tests") +/// } +/// } +/// } +/// +/// # fn main() {} +/// ``` +pub use sp_api_proc_macro::mock_impl_runtime_apis; + /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] pub type ProofRecorder = sp_state_machine::ProofRecorder>; @@ -293,21 +359,14 @@ pub trait ApiExt: ApiErrorExt { fn has_api( &self, at: &BlockId, - ) -> Result where Self: Sized { - self.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) - } + ) -> Result where Self: Sized; /// Check if the given api is implemented and the version passes a predicate. fn has_api_with bool>( &self, at: &BlockId, pred: P, - ) -> Result where Self: Sized { - self.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) - } - - /// Returns the runtime version at the given block id. - fn runtime_version_at(&self, at: &BlockId) -> Result; + ) -> Result where Self: Sized; /// Start recording all accessed trie nodes for generating proofs. fn record_proof(&mut self); diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index 3b41e28cf3b2b70e489835c605880a019e193c25..f2e66afc24abbac9f4c5604701d6b6f558bf3b4d 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -9,22 +9,22 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0-alpha.2", path = "../" } +sp-api = { version = "2.0.0-alpha.5", path = "../" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0-alpha.2", path = "../../version" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../blockchain" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../../client/block-builder" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +sp-version = { version = "2.0.0-alpha.5", path = "../../version" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../runtime" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../blockchain" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../../client/block-builder" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } -sp-core = { version = "2.0.0-alpha.1", path = "../../core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../core" } [[bench]] name = "bench" @@ -34,3 +34,6 @@ harness = false [features] default = [ "std" ] std = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index d5a668dec1245a34ad054bfd7813298863aa07ca..a09bd0412c8afe9f15deb078a3f338dd0937ac59 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -14,7 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_api::{RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis}; +use sp_api::{ + RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, + ApiExt, +}; use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; @@ -81,6 +84,34 @@ impl_runtime_apis! { } } +struct MockApi { + block: Option, +} + +mock_impl_runtime_apis! { + impl Api for MockApi { + fn test(_: u64) { + unimplemented!() + } + + fn something_with_block(&self, _: Block) -> Block { + self.block.clone().unwrap() + } + + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } + + fn same_name() {} + + fn wild_card(_: u32) {} + } + + impl ApiWithCustomVersion for MockApi { + fn same_name() {} + } +} + type TestClient = substrate_test_runtime_client::sc_client::Client< substrate_test_runtime_client::Backend, substrate_test_runtime_client::Executor, @@ -129,3 +160,22 @@ fn check_runtime_api_versions() { check_runtime_api_versions_contains::>(); check_runtime_api_versions_contains::>(); } + +#[test] +fn mock_runtime_api_has_api() { + let mock = MockApi { block: None }; + + assert!( + mock.has_api::>(&BlockId::Number(0)).unwrap(), + ); + assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); +} + +#[test] +#[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] +fn mock_runtime_api_panics_on_calling_old_version() { + let mock = MockApi { block: None }; + + #[allow(deprecated)] + let _ = mock.same_name_before_version_2(&BlockId::Number(0)); +} diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index eb128a512c9c4eb2d407a3e51baad5e5f8f9eb14..a907ac80957200d8135ee50e3782420c134a3411 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -200,6 +200,7 @@ fn record_proof_works() { &backend, &mut overlay, &executor, + sp_core::tasks::executor(), "Core_execute_block", &block.encode(), &runtime_code, diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.rs b/primitives/api/test/tests/ui/mock_only_error_associated_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..bbd3c71c940177aba081954af7291c4f427bf604 --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_error_associated_type.rs @@ -0,0 +1,19 @@ +use substrate_test_runtime_client::runtime::Block; + +sp_api::decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } +} + +struct MockApi; + +sp_api::mock_impl_runtime_apis! { + impl Api for MockApi { + type OtherData = u32; + + fn test(data: u64) {} + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr b/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..beced70413bb07cb873789ba48181d0d59d6ac32 --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_error_associated_type.stderr @@ -0,0 +1,5 @@ +error: Only associated type with name `Error` is allowed + --> $DIR/mock_only_error_associated_type.rs:13:3 + | +13 | type OtherData = u32; + | ^^^^ diff --git a/primitives/api/test/tests/ui/mock_only_one_block_type.rs b/primitives/api/test/tests/ui/mock_only_one_block_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..969b21d7378d75b6b2bb28c1ca118aa041015784 --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_block_type.rs @@ -0,0 +1,27 @@ +use substrate_test_runtime_client::runtime::Block; + +struct Block2; + +sp_api::decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + + pub trait Api2 { + fn test(data: u64); + } +} + +struct MockApi; + +sp_api::mock_impl_runtime_apis! { + impl Api for MockApi { + fn test(data: u64) {} + } + + impl Api2 for MockApi { + fn test(data: u64) {} + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_one_block_type.stderr b/primitives/api/test/tests/ui/mock_only_one_block_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..1abc8db726a160e151cb47abf6b745d5630b1851 --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_block_type.stderr @@ -0,0 +1,19 @@ +error: Block type should be the same between all runtime apis. + --> $DIR/mock_only_one_block_type.rs:22:12 + | +22 | impl Api2 for MockApi { + | ^^^^^^ + +error: First block type found here + --> $DIR/mock_only_one_block_type.rs:18:11 + | +18 | impl Api for MockApi { + | ^^^^^ + +warning: unused import: `substrate_test_runtime_client::runtime::Block` + --> $DIR/mock_only_one_block_type.rs:1:5 + | +1 | use substrate_test_runtime_client::runtime::Block; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.rs b/primitives/api/test/tests/ui/mock_only_one_error_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..1c3f13dbb9bf1107e5c88fa50bd6ef12eb485b4c --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.rs @@ -0,0 +1,29 @@ +use substrate_test_runtime_client::runtime::Block; + +sp_api::decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + + pub trait Api2 { + fn test(data: u64); + } +} + +struct MockApi; + +sp_api::mock_impl_runtime_apis! { + impl Api for MockApi { + type Error = u32; + + fn test(data: u64) {} + } + + impl Api2 for MockApi { + type Error = u64; + + fn test(data: u64) {} + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_one_error_type.stderr b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8f026838c96b8db0e503179c745b3a76f4a2c181 --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_error_type.stderr @@ -0,0 +1,27 @@ +error: Error type can not change between runtime apis + --> $DIR/mock_only_one_error_type.rs:23:3 + | +23 | type Error = u64; + | ^^^^ + +error[E0277]: the trait bound `u32: std::convert::From` is not satisfied + --> $DIR/mock_only_one_error_type.rs:15:1 + | +15 | / sp_api::mock_impl_runtime_apis! { +16 | | impl Api for MockApi { +17 | | type Error = u32; +18 | | +... | +26 | | } +27 | | } + | | ^ + | | | + | |_the trait `std::convert::From` is not implemented for `u32` + | in this macro invocation + | + = help: the following implementations were found: + > + > + > + > + and 16 others diff --git a/primitives/api/test/tests/ui/mock_only_one_self_type.rs b/primitives/api/test/tests/ui/mock_only_one_self_type.rs new file mode 100644 index 0000000000000000000000000000000000000000..4b29ec2a6ab07e7a08689549423e03687501543d --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_self_type.rs @@ -0,0 +1,26 @@ +use substrate_test_runtime_client::runtime::Block; + +sp_api::decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + } + + pub trait Api2 { + fn test(data: u64); + } +} + +struct MockApi; +struct MockApi2; + +sp_api::mock_impl_runtime_apis! { + impl Api for MockApi { + fn test(data: u64) {} + } + + impl Api2 for MockApi2 { + fn test(data: u64) {} + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_one_self_type.stderr b/primitives/api/test/tests/ui/mock_only_one_self_type.stderr new file mode 100644 index 0000000000000000000000000000000000000000..996d1d44c044c151bc5e57e93e83d3749e86a80c --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_one_self_type.stderr @@ -0,0 +1,19 @@ +error: Self type should not change between runtime apis + --> $DIR/mock_only_one_self_type.rs:21:23 + | +21 | impl Api2 for MockApi2 { + | ^^^^^^^^ + +error: First self type found here + --> $DIR/mock_only_one_self_type.rs:17:22 + | +17 | impl Api for MockApi { + | ^^^^^^^ + +warning: unused import: `substrate_test_runtime_client::runtime::Block` + --> $DIR/mock_only_one_self_type.rs:1:5 + | +1 | use substrate_test_runtime_client::runtime::Block; + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | + = note: `#[warn(unused_imports)]` on by default diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.rs b/primitives/api/test/tests/ui/mock_only_self_reference.rs new file mode 100644 index 0000000000000000000000000000000000000000..8a733f5779ce938edcd1301b535938288b66737b --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_self_reference.rs @@ -0,0 +1,20 @@ +use substrate_test_runtime_client::runtime::Block; + +sp_api::decl_runtime_apis! { + pub trait Api { + fn test(data: u64); + fn test2(data: u64); + } +} + +struct MockApi; + +sp_api::mock_impl_runtime_apis! { + impl Api for MockApi { + fn test(self, data: u64) {} + + fn test2(&mut self, data: u64) {} + } +} + +fn main() {} diff --git a/primitives/api/test/tests/ui/mock_only_self_reference.stderr b/primitives/api/test/tests/ui/mock_only_self_reference.stderr new file mode 100644 index 0000000000000000000000000000000000000000..9c1658b0a6cea1260b082eee7afea6d2521c66ac --- /dev/null +++ b/primitives/api/test/tests/ui/mock_only_self_reference.stderr @@ -0,0 +1,65 @@ +error: Only `&self` is supported! + --> $DIR/mock_only_self_reference.rs:14:11 + | +14 | fn test(self, data: u64) {} + | ^^^^ + +error: Only `&self` is supported! + --> $DIR/mock_only_self_reference.rs:16:12 + | +16 | fn test2(&mut self, data: u64) {} + | ^ + +error[E0053]: method `Api_test_runtime_api_impl` has an incompatible type for trait + --> $DIR/mock_only_self_reference.rs:12:1 + | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); +6 | | fn test2(data: u64); +7 | | } +8 | | } + | |_- type in trait +... +12 | sp_api::mock_impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- in this macro invocation + | + = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` + +error[E0053]: method `Api_test2_runtime_api_impl` has an incompatible type for trait + --> $DIR/mock_only_self_reference.rs:12:1 + | +3 | / sp_api::decl_runtime_apis! { +4 | | pub trait Api { +5 | | fn test(data: u64); +6 | | fn test2(data: u64); +7 | | } +8 | | } + | |_- type in trait +... +12 | sp_api::mock_impl_runtime_apis! { + | -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | | + | _expected `u64`, found `()` + | | +13 | | impl Api for MockApi { +14 | | fn test(self, data: u64) {} +15 | | +16 | | fn test2(&mut self, data: u64) {} +17 | | } +18 | | } + | |_- in this macro invocation + | + = note: expected fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option, std::vec::Vec<_>) -> std::result::Result<_, _>` + found fn pointer `fn(&MockApi, &sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::BlockId, substrate_test_runtime::Extrinsic>>, sp_api_hidden_includes_DECL_RUNTIME_APIS::sp_api::ExecutionContext, std::option::Option<()>, std::vec::Vec<_>) -> std::result::Result<_, _>` diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index ee710b29885a3fbe80202eb06e5799d875614cda..7c3fb5357997ffa2e7470c5aaf94cca168594fc8 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-application-crypto" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." @@ -11,11 +11,11 @@ documentation = "https://docs.rs/sp-application-crypto" [dependencies] -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } [features] default = [ "std" ] @@ -31,3 +31,6 @@ full_crypto = [ "sp-io/disable_panic_handler", "sp-io/disable_oom", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/application-crypto/src/ed25519.rs b/primitives/application-crypto/src/ed25519.rs index 414715a10684c18b49a33fe6bb1fc08c20639d4a..5be79ff4f7985a54e9eb0c34fb3552f3cf062bb1 100644 --- a/primitives/application-crypto/src/ed25519.rs +++ b/primitives/application-crypto/src/ed25519.rs @@ -23,12 +23,27 @@ use sp_std::vec::Vec; pub use sp_core::ed25519::*; mod app { + use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; use sp_core::testing::ED25519; + use sp_core::ed25519::CRYPTO_ID; + crate::app_crypto!(super, ED25519); impl crate::traits::BoundToRuntimeAppPublic for Public { type Public = Self; } + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } + } } pub use app::{Public as AppPublic, Signature as AppSignature}; diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index b7c9ccaa9821aa49b9f14e70f841e05e1ef9853f..79572eb49d1e60d38a06b13356481c25a23803cf 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -21,11 +21,11 @@ #![cfg_attr(not(feature = "std"), no_std)] #[doc(hidden)] -pub use sp_core::{self, crypto::{CryptoType, Public, Derive, IsWrappedBy, Wraps}, RuntimeDebug}; +pub use sp_core::{self, crypto::{CryptoType, CryptoTypePublicPair, Public, Derive, IsWrappedBy, Wraps}, RuntimeDebug}; #[doc(hidden)] #[cfg(feature = "full_crypto")] pub use sp_core::crypto::{SecretStringError, DeriveJunction, Ss58Codec, Pair}; -pub use sp_core::{crypto::{KeyTypeId, key_types}}; +pub use sp_core::crypto::{CryptoTypeId, KeyTypeId, key_types}; #[doc(hidden)] pub use codec; @@ -103,17 +103,8 @@ macro_rules! app_crypto_pair { type Signature = Signature; type DeriveError = <$pair as $crate::Pair>::DeriveError; - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { - let r = <$pair>::generate_with_phrase(password); - (Self(r.0), r.1, r.2) - } - #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, Self::Seed), $crate::SecretStringError> - { - <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) - } + $crate::app_crypto_pair_functions_if_std!($pair); + fn derive< Iter: Iterator >(&self, path: Iter, seed: Option) -> Result<(Self, Option), Self::DeriveError> { @@ -158,10 +149,38 @@ macro_rules! app_crypto_pair { }; } +/// Implements functions for the `Pair` trait when `feature = "std"` is enabled. +#[doc(hidden)] +#[cfg(feature = "std")] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_std { + ($pair:ty) => { + fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { + let r = <$pair>::generate_with_phrase(password); + (Self(r.0), r.1, r.2) + } + + fn from_phrase(phrase: &str, password: Option<&str>) + -> Result<(Self, Self::Seed), $crate::SecretStringError> + { + <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) + } + } +} + +#[doc(hidden)] +#[cfg(not(feature = "std"))] +#[macro_export] +macro_rules! app_crypto_pair_functions_if_std { + ($pair:ty) => {} +} + + /// Declares Public type which is functionally equivalent to `$public`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature /// For full functionality, app_crypto_public_common! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_full_crypto { ($public:ty, $key_type:expr) => { @@ -195,6 +214,7 @@ macro_rules! app_crypto_public_full_crypto { /// Application-specific type whose identifier is `$key_type`. /// can only be used without `full_crypto` feature /// For full functionality, app_crypto_public_common! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_not_full_crypto { ($public:ty, $key_type:expr) => { @@ -223,44 +243,11 @@ macro_rules! app_crypto_public_not_full_crypto { /// Declares Public type which is functionally equivalent to `$public`, but is new /// Application-specific type whose identifier is `$key_type`. /// For full functionality, app_crypto_public_(not)_full_crypto! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_common { ($public:ty, $sig:ty, $key_type:expr) => { - impl $crate::Derive for Public { - #[cfg(feature = "std")] - fn derive>(&self, - path: Iter - ) -> Option { - self.0.derive(path).map(Self) - } - } - - #[cfg(feature = "std")] - impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - use $crate::Ss58Codec; - write!(f, "{}", self.0.to_ss58check()) - } - } - #[cfg(feature = "std")] - impl $crate::serde::Serialize for Public { - fn serialize(&self, serializer: S) -> std::result::Result where - S: $crate::serde::Serializer - { - use $crate::Ss58Codec; - serializer.serialize_str(&self.to_ss58check()) - } - } - #[cfg(feature = "std")] - impl<'de> $crate::serde::Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> std::result::Result where - D: $crate::serde::Deserializer<'de> - { - use $crate::Ss58Codec; - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) - } - } + $crate::app_crypto_public_common_if_std!(); impl AsRef<[u8]> for Public { fn as_ref(&self) -> &[u8] { self.0.as_ref() } @@ -309,10 +296,63 @@ macro_rules! app_crypto_public_common { } } +/// Implements traits for the public key type if `feature = "std"` is enabled. +#[cfg(feature = "std")] +#[doc(hidden)] +#[macro_export] +macro_rules! app_crypto_public_common_if_std { + () => { + impl $crate::Derive for Public { + fn derive>(&self, + path: Iter + ) -> Option { + self.0.derive(path).map(Self) + } + } + + impl std::fmt::Display for Public { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use $crate::Ss58Codec; + write!(f, "{}", self.0.to_ss58check()) + } + } + + impl $crate::serde::Serialize for Public { + fn serialize(&self, serializer: S) -> std::result::Result where + S: $crate::serde::Serializer + { + use $crate::Ss58Codec; + serializer.serialize_str(&self.to_ss58check()) + } + } + + impl<'de> $crate::serde::Deserialize<'de> for Public { + fn deserialize(deserializer: D) -> std::result::Result where + D: $crate::serde::Deserializer<'de> + { + use $crate::Ss58Codec; + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) + } + } + } +} + +#[cfg(not(feature = "std"))] +#[doc(hidden)] +#[macro_export] +macro_rules! app_crypto_public_common_if_std { + () => { + impl $crate::Derive for Public {} + } +} + + /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature /// For full functionality, app_crypto_public_common! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_full_crypto { ($sig:ty, $key_type:expr) => { @@ -345,6 +385,7 @@ macro_rules! app_crypto_signature_full_crypto { /// Application-specific type whose identifier is `$key_type`. /// can only be used without `full_crypto` feature /// For full functionality, app_crypto_public_common! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_not_full_crypto { ($sig:ty, $key_type:expr) => { @@ -372,6 +413,7 @@ macro_rules! app_crypto_signature_not_full_crypto { /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. /// For full functionality, app_crypto_public_(not)_full_crypto! must be called too. +#[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_common { ($sig:ty, $key_type:expr) => { diff --git a/primitives/application-crypto/src/sr25519.rs b/primitives/application-crypto/src/sr25519.rs index 59c6f19b6f2668939054127347f5a87ed6989907..a0f2cef1c4e45b6150be335ac271e3dd592febbe 100644 --- a/primitives/application-crypto/src/sr25519.rs +++ b/primitives/application-crypto/src/sr25519.rs @@ -23,12 +23,27 @@ use sp_std::vec::Vec; pub use sp_core::sr25519::*; mod app { + use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; use sp_core::testing::SR25519; + use sp_core::sr25519::CRYPTO_ID; + crate::app_crypto!(super, SR25519); impl crate::traits::BoundToRuntimeAppPublic for Public { type Public = Self; } + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } + } } pub use app::{Public as AppPublic, Signature as AppSignature}; diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index 0ed5355ce2d1e527303aa2badb0f9a1f1c544200..d34840b4eb5206429fbf829f60c2e11187b838f4 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -10,8 +10,11 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../core" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../api" } -sp-application-crypto = { version = "2.0.0-alpha.2", path = "../" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../api" } +sp-application-crypto = { version = "2.0.0-alpha.5", path = "../" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index 400edfd6ae4402f9ab77fc0fe4fde9f61dfd89ce..1d72962829a53309ed4146a5f8657820ba15b697 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -17,7 +17,10 @@ //! Integration tests for ed25519 use sp_runtime::generic::BlockId; -use sp_core::{testing::{KeyStore, ED25519}, crypto::Pair}; +use sp_core::{ + crypto::Pair, + testing::{KeyStore, ED25519}, +}; use substrate_test_runtime_client::{ TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, runtime::TestAPI, @@ -33,8 +36,7 @@ fn ed25519_works_in_runtime() { .test_ed25519_crypto(&BlockId::Number(0)) .expect("Tests `ed25519` crypto."); - let key_pair = keystore.read().ed25519_key_pair(ED25519, &public.as_ref()) - .expect("There should be at a `ed25519` key in the keystore for the given public key."); - - assert!(AppPair::verify(&signature, "ed25519", &AppPublic::from(key_pair.public()))); + let supported_keys = keystore.read().keys(ED25519).unwrap(); + assert!(supported_keys.contains(&public.clone().into())); + assert!(AppPair::verify(&signature, "ed25519", &AppPublic::from(public))); } diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index 49bb3c2a8364027c1dd4eceef607deb49ca09469..f2c7c48b2bc91c0329ecaad7541be5f13f3f754c 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -18,7 +18,10 @@ use sp_runtime::generic::BlockId; -use sp_core::{testing::{KeyStore, SR25519}, crypto::Pair}; +use sp_core::{ + crypto::Pair, + testing::{KeyStore, SR25519}, +}; use substrate_test_runtime_client::{ TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, runtime::TestAPI, @@ -34,8 +37,7 @@ fn sr25519_works_in_runtime() { .test_sr25519_crypto(&BlockId::Number(0)) .expect("Tests `sr25519` crypto."); - let key_pair = keystore.read().sr25519_key_pair(SR25519, public.as_ref()) - .expect("There should be at a `sr25519` key in the keystore for the given public key."); - - assert!(AppPair::verify(&signature, "sr25519", &AppPublic::from(key_pair.public()))); + let supported_keys = keystore.read().keys(SR25519).unwrap(); + assert!(supported_keys.contains(&public.clone().into())); + assert!(AppPair::verify(&signature, "sr25519", &AppPublic::from(public))); } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 617e8b8e5ceb86866e51eed4bfa036bcae7d2c61..208525f6c193bdca2b9b5f9ba4a566999d523544 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-arithmetic" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,15 +11,15 @@ documentation = "https://docs.rs/sp-arithmetic" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/debug-derive" } [dev-dependencies] -primitive-types = "0.6.2" +primitive-types = "0.7.0" rand = "0.7.2" criterion = "0.3" @@ -36,3 +36,6 @@ std = [ [[bench]] name = "bench" harness = false + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/arithmetic/fuzzer/Cargo.lock b/primitives/arithmetic/fuzzer/Cargo.lock index b196418375c59ee17bb8ba03680011d5b0efd1da..3a4187437ae7312ec7016cb9e41638a018cfcbbf 100644 --- a/primitives/arithmetic/fuzzer/Cargo.lock +++ b/primitives/arithmetic/fuzzer/Cargo.lock @@ -32,9 +32,9 @@ checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" [[package]] name = "byteorder" -version = "1.3.2" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7c3dd8985a7111efc5c80b44e23ecdd8c007de8ade3b96595387e812b957cf5" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" [[package]] name = "c2-chacha" @@ -83,8 +83,6 @@ dependencies = [ [[package]] name = "honggfuzz" version = "0.5.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24c27b4aa3049d6d10d8e33d52c9d03ca9aec18f8a449b246f8c4a5b0c10fb34" dependencies = [ "arbitrary", "lazy_static", @@ -114,9 +112,9 @@ checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" [[package]] name = "libc" -version = "0.2.66" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" [[package]] name = "memmap" @@ -160,9 +158,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "1.1.2" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f747c06d9f3b2ad387ac881b9667298c81b1243aa9833f086e05996937c35507" +checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910" dependencies = [ "arrayvec", "bitvec", @@ -173,9 +171,9 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.1.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e513ff3e406f3ede6796dcdc83d0b32ffb86668cea1ccf7363118abeb00476" +checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -211,18 +209,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.8" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" dependencies = [ "unicode-xid", ] [[package]] name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" dependencies = [ "proc-macro2", ] @@ -296,7 +294,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "integer-sqrt", "num-traits", @@ -319,7 +317,7 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "proc-macro2", "quote", @@ -328,7 +326,7 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0" +version = "2.0.0-alpha.3" [[package]] name = "static_assertions" @@ -338,9 +336,9 @@ checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" [[package]] name = "syn" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" dependencies = [ "proc-macro2", "quote", diff --git a/primitives/arithmetic/fuzzer/Cargo.toml b/primitives/arithmetic/fuzzer/Cargo.toml index 19d677f744502f1fdceb72920cf0d35a97760427..26b5f8c27b5085fd615dce5af70939e994c76767 100644 --- a/primitives/arithmetic/fuzzer/Cargo.toml +++ b/primitives/arithmetic/fuzzer/Cargo.toml @@ -1,23 +1,32 @@ [package] name = "sp-arithmetic-fuzzer" -version = "2.0.0" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Fuzzer for fixed point arithmetic primitives." +documentation = "https://docs.rs/sp-arithmetic-fuzzer" [dependencies] -sp-arithmetic = { version = "2.0.0", path = ".." } +sp-arithmetic = { version = "2.0.0-alpha.5", path = ".." } honggfuzz = "0.5" -primitive-types = "0.6.2" +primitive-types = "0.7.0" num-bigint = "0.2" num-traits = "0.2" -[workspace] - [[bin]] name = "biguint" path = "src/biguint.rs" +[[bin]] +name = "per_thing_rational" +path = "src/per_thing_rational.rs" + [[bin]] name = "rational128" path = "src/rational128.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs new file mode 100644 index 0000000000000000000000000000000000000000..c2dda3de2299cb713114a856c6eafbedf2d91be2 --- /dev/null +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -0,0 +1,122 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # Running +//! Running this fuzzer can be done with `cargo hfuzz run per_thing_rational`. `honggfuzz` CLI options can +//! be used by setting `HFUZZ_RUN_ARGS`, such as `-n 4` to use 4 threads. +//! +//! # Debugging a panic +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/per_thing_rational/*.fuzz`. + +use honggfuzz::fuzz; +use sp_arithmetic::{ + PerThing, PerU16, Percent, Perbill, Perquintill, traits::SaturatedConversion, +}; + +fn main() { + loop { + fuzz!(| + data: ((u16, u16), (u32, u32), (u64, u64)) + | { + + let (u16_pair, u32_pair, u64_pair) = data; + + // peru16 + let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + // percent + let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + // perbill + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = Perbill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 100, + ); + + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Perbill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 100, + ); + + // perquintillion + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Perquintill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perquintill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1000, + ); + + }) + } +} + +fn assert_per_thing_equal_error(a: T, b: T, err: u128) { + let a_abs = a.deconstruct().saturated_into::(); + let b_abs = b.deconstruct().saturated_into::(); + let diff = a_abs.max(b_abs) - a_abs.min(b_abs); + dbg!(&diff); + assert!(diff <= err, "{:?} !~ {:?}", a, b); +} diff --git a/primitives/arithmetic/src/fixed64.rs b/primitives/arithmetic/src/fixed64.rs index eea1ab68a3bb2272714fb0e3e506201634ffcaa0..6b399b6aa5106d3889c1152c8cb1e7fc38a14585 100644 --- a/primitives/arithmetic/src/fixed64.rs +++ b/primitives/arithmetic/src/fixed64.rs @@ -110,12 +110,18 @@ impl Saturating for Fixed64 { fn saturating_add(self, rhs: Self) -> Self { Self(self.0.saturating_add(rhs.0)) } + fn saturating_mul(self, rhs: Self) -> Self { Self(self.0.saturating_mul(rhs.0) / DIV) } + fn saturating_sub(self, rhs: Self) -> Self { Self(self.0.saturating_sub(rhs.0)) } + + fn saturating_pow(self, exp: usize) -> Self { + Self(self.0.saturating_pow(exp as u32)) + } } /// Note that this is a standard, _potentially-panicking_, implementation. Use `Saturating` trait diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index c2feae00b74ff9a3e0d016558c2dfbaee2e1dffd..f6d8b53e3499b340b27285964e8c366be39ca158 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -19,7 +19,7 @@ #![cfg_attr(not(feature = "std"), no_std)] /// Copied from `sp-runtime` and documented there. -#[cfg(test)] +#[macro_export] macro_rules! assert_eq_error_rate { ($x:expr, $y:expr, $error:expr $(,)?) => { assert!( @@ -40,5 +40,17 @@ mod fixed64; mod rational128; pub use fixed64::Fixed64; -pub use per_things::{PerThing, Percent, Permill, Perbill, Perquintill}; +pub use per_things::{PerThing, Percent, PerU16, Permill, Perbill, Perquintill}; pub use rational128::Rational128; + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn peru16_rational_does_not_overflow() { + // A historical example that will panic only for per_thing type that are created with + // maximum capacity of their type, e.g. PerU16. + let _ = PerU16::from_rational_approximation(17424870u32, 17424870); + } +} diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index cbb804baf5d49e97ce7d9817b7169dffe6e87643..ad529fbf32e249ef1fb0e5838c30fce52aab4b25 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -17,42 +17,160 @@ #[cfg(feature = "std")] use serde::{Serialize, Deserialize}; -use sp_std::{ops, prelude::*, convert::TryInto}; -use codec::{Encode, Decode, CompactAs}; +use sp_std::{ops, fmt, prelude::*, convert::TryInto}; +use codec::{Encode, CompactAs}; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, + SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, }; use sp_debug_derive::RuntimeDebug; /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. -pub trait PerThing: Sized + Saturating + Copy { +pub trait PerThing: + Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug +{ /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Copy; + type Inner: BaseArithmetic + Copy + fmt::Debug; - /// accuracy of this type - const ACCURACY: Self::Inner; + /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. + /// It must be able to compute `ACCURACY^2`. + type Upper: BaseArithmetic + Copy + From + TryInto + fmt::Debug; - /// NoThing - fn zero() -> Self; + /// The accuracy of this type. + const ACCURACY: Self::Inner; - /// `true` if this is nothing. - fn is_zero(&self) -> bool; + /// Equivalent to `Self::from_parts(0)`. + fn zero() -> Self { Self::from_parts(Self::Inner::zero()) } - /// Everything. - fn one() -> Self; + /// Return `true` if this is nothing. + fn is_zero(&self) -> bool { self.deconstruct() == Self::Inner::zero() } - /// Consume self and deconstruct into a raw numeric type. - fn deconstruct(self) -> Self::Inner; + /// Equivalent to `Self::from_parts(Self::ACCURACY)`. + fn one() -> Self { Self::from_parts(Self::ACCURACY) } - /// From an explicitly defined number of parts per maximum of the type. - fn from_parts(parts: Self::Inner) -> Self; + /// Return `true` if this is one. + fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } - /// Converts a percent into `Self`. Equal to `x / 100`. - fn from_percent(x: Self::Inner) -> Self; + /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` + /// but more accurate. + fn from_percent(x: Self::Inner) -> Self { + let a = x.min(100.into()); + let b = Self::ACCURACY; + // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy + let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); + Self::from_parts(a / 100.into() * b + c) + } /// Return the product of multiplication of this value by itself. - fn square(self) -> Self; + fn square(self) -> Self { + let p = Self::Upper::from(self.deconstruct()); + let q = Self::Upper::from(Self::ACCURACY); + Self::from_rational_approximation(p * p, q * q) + } + + /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the + /// nearest whole number. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(34) * 10u64, 3); + /// assert_eq!(Percent::from_percent(36) * 10u64, 4); + /// + /// // round down + /// assert_eq!(Percent::from_percent(34).mul_floor(10u64), 3); + /// assert_eq!(Percent::from_percent(36).mul_floor(10u64), 3); + /// # } + /// ``` + fn mul_floor(self, b: N) -> N + where N: Clone + From + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + { + overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) + } + + /// Multiplication that always rounds the result up to a whole number. The standard `Mul` + /// rounds to the nearest whole number. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(34) * 10u64, 3); + /// assert_eq!(Percent::from_percent(36) * 10u64, 4); + /// + /// // round up + /// assert_eq!(Percent::from_percent(34).mul_ceil(10u64), 4); + /// assert_eq!(Percent::from_percent(36).mul_ceil(10u64), 4); + /// # } + /// ``` + fn mul_ceil(self, b: N) -> N + where N: Clone + From + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + { + overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// assert_eq!(Percent::from_percent(50).saturating_reciprocal_mul(10u64), 20); + /// # } + /// ``` + fn saturating_reciprocal_mul(self, b: N) -> N + where N: Clone + From + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded down to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul(10u64), 17); + /// // round down + /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul_floor(10u64), 16); + /// # } + /// ``` + fn saturating_reciprocal_mul_floor(self, b: N) -> N + where N: Clone + From + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded up to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul(10u64), 16); + /// // round up + /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul_ceil(10u64), 17); + /// # } + /// ``` + fn saturating_reciprocal_mul_ceil(self, b: N) -> N + where N: Clone + From + UniqueSaturatedInto + ops::Rem + + ops::Div + ops::Mul + ops::Add + Saturating + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) + } + + /// Consume self and return the number of parts per thing. + fn deconstruct(self) -> Self::Inner; + + /// Build this type from a number of parts per thing. + fn from_parts(parts: Self::Inner) -> Self; /// Converts a fraction into `Self`. #[cfg(feature = "std")] @@ -63,88 +181,190 @@ pub trait PerThing: Sized + Saturating + Copy { /// The computation of this approximation is performed in the generic type `N`. Given /// `M` as the data type that can hold the maximum value of this per-thing (e.g. u32 for /// perbill), this can only work if `N == M` or `N: From + TryInto`. + /// + /// Note that this always rounds _down_, i.e. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // 989/100 is technically closer to 99%. + /// assert_eq!( + /// Percent::from_rational_approximation(989, 1000), + /// Percent::from_parts(98), + /// ); + /// # } + /// ``` fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + ops::Div; + where N: Clone + Ord + From + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add; +} + +/// The rounding method to use. +/// +/// `PerThing`s are unsigned so `Up` means towards infinity and `Down` means towards zero. +/// `Nearest` will round an exact half down. +enum Rounding { + Up, + Down, + Nearest, +} + +/// Saturating reciprocal multiplication. Compute `x / self`, saturating at the numeric +/// bounds instead of overflowing. +fn saturating_reciprocal_mul( + x: N, + part: P::Inner, + rounding: Rounding, +) -> N +where + N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating, + P: PerThing, +{ + let maximum: N = P::ACCURACY.into(); + let c = rational_mul_correction::( + x.clone(), + P::ACCURACY, + part, + rounding, + ); + (x / part.into()).saturating_mul(maximum).saturating_add(c) +} + +/// Overflow-prune multiplication. Accurately multiply a value by `self` without overflowing. +fn overflow_prune_mul( + x: N, + part: P::Inner, + rounding: Rounding, +) -> N +where + N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, + P: PerThing, +{ + let maximum: N = P::ACCURACY.into(); + let part_n: N = part.into(); + let c = rational_mul_correction::( + x.clone(), + part, + P::ACCURACY, + rounding, + ); + (x / maximum) * part_n + c +} + +/// Compute the error due to integer division in the expression `x / denom * numer`. +/// +/// Take the remainder of `x / denom` and multiply by `numer / denom`. The result can be added +/// to `x / denom * numer` for an accurate result. +fn rational_mul_correction( + x: N, + numer: P::Inner, + denom: P::Inner, + rounding: Rounding, +) -> N +where + N: From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, + P: PerThing, +{ + let numer_upper = P::Upper::from(numer); + let denom_n = N::from(denom); + let denom_upper = P::Upper::from(denom); + let rem = x.rem(denom_n); + // `rem` is less than `denom`, which fits in `P::Inner`. + let rem_inner = rem.saturated_into::(); + // `P::Upper` always fits `P::Inner::max_value().pow(2)`, thus it fits `rem * numer`. + let rem_mul_upper = P::Upper::from(rem_inner) * numer_upper; + // `rem` is less than `denom`, so `rem * numer / denom` is less than `numer`, which fits in + // `P::Inner`. + let mut rem_mul_div_inner = (rem_mul_upper / denom_upper).saturated_into::(); + match rounding { + // Already rounded down + Rounding::Down => {}, + // Round up if the fractional part of the result is non-zero. + Rounding::Up => if rem_mul_upper % denom_upper > 0.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner = rem_mul_div_inner + 1.into(); + }, + // Round up if the fractional part of the result is greater than a half. An exact half is + // rounded down. + Rounding::Nearest => if rem_mul_upper % denom_upper > denom_upper / 2.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner = rem_mul_div_inner + 1.into(); + }, + } + rem_mul_div_inner.into() } macro_rules! implement_per_thing { - ($name:ident, $test_mod:ident, [$($test_units:tt),+], $max:tt, $type:ty, $upper_type:ty, $title:expr $(,)?) => { - /// A fixed point representation of a number between in the range [0, 1]. + ( + $name:ident, + $test_mod:ident, + [$($test_units:tt),+], + $max:tt, + $type:ty, + $upper_type:ty, + $title:expr $(,)? + ) => { + /// A fixed point representation of a number in the range [0, 1]. /// #[doc = $title] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] - #[derive(Encode, Decode, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, RuntimeDebug, CompactAs)] + #[derive(Encode, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, + RuntimeDebug, CompactAs)] pub struct $name($type); impl PerThing for $name { type Inner = $type; + type Upper = $upper_type; - /// The accuracy of this type. const ACCURACY: Self::Inner = $max; - /// Nothing. - fn zero() -> Self { Self(0) } - - /// `true` if this is nothing. - fn is_zero(&self) -> bool { self.0 == 0 } - - /// Everything. - fn one() -> Self { Self($max) } - - /// Consume self and deconstruct into a raw numeric type. + /// Consume self and return the number of parts per thing. fn deconstruct(self) -> Self::Inner { self.0 } - /// From an explicitly defined number of parts per maximum of the type. - fn from_parts(parts: Self::Inner) -> Self { - Self([parts, $max][(parts > $max) as usize]) - } - - /// Converts a percent into `Self`. Equal to `x / 100`. - fn from_percent(x: Self::Inner) -> Self { - Self([x, 100][(x > 100) as usize] * ($max / 100)) - } + /// Build this type from a number of parts per thing. + fn from_parts(parts: Self::Inner) -> Self { Self(parts.min($max)) } - /// Return the product of multiplication of this value by itself. - fn square(self) -> Self { - // both can be safely casted and multiplied. - let p: $upper_type = self.0 as $upper_type * self.0 as $upper_type; - let q: $upper_type = <$upper_type>::from($max) * <$upper_type>::from($max); - Self::from_rational_approximation(p, q) - } - - /// Converts a fraction into `Self`. #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self { Self((x * ($max as f64)) as Self::Inner) } + fn from_fraction(x: f64) -> Self { + Self::from_parts((x * $max as f64) as Self::Inner) + } - /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. - /// - /// The computation of this approximation is performed in the generic type `N`. Given - /// `M` as the data type that can hold the maximum value of this per-thing (e.g. u32 for - /// perbill), this can only work if `N == M` or `N: From + TryInto`. fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + ops::Div + where N: Clone + Ord + From + TryInto + TryInto + + ops::Div + ops::Rem + ops::Add { + let div_ceil = |x: N, f: N| -> N { + let mut o = x.clone() / f.clone(); + let r = x.rem(f.clone()); + if r > N::from(0) { + o = o + N::from(1); + } + o + }; + // q cannot be zero. - let q = q.max((1 as Self::Inner).into()); + let q: N = q.max((1 as Self::Inner).into()); // p should not be bigger than q. - let p = p.min(q.clone()); + let p: N = p.min(q.clone()); - let factor = (q.clone() / $max.into()).max((1 as Self::Inner).into()); + let factor: N = div_ceil(q.clone(), $max.into()).max((1 as Self::Inner).into()); - // q cannot overflow: (q / (q/$max)) < 2 * $max. p < q hence p also cannot overflow. - // this implies that Self::Inner must be able to fit 2 * $max. - let q_reduce: Self::Inner = (q / factor.clone()) + // q cannot overflow: (q / (q/$max)) < $max. p < q hence p also cannot overflow. + let q_reduce: $type = (q.clone() / factor.clone()) .try_into() .map_err(|_| "Failed to convert") .expect( - "q / (q/$max) < (2 * $max). Macro prevents any type being created that \ + "q / ceil(q/$max) < $max. Macro prevents any type being created that \ does not satisfy this; qed" ); - let p_reduce: Self::Inner = (p / factor.clone()) + let p_reduce: $type = (p / factor) .try_into() .map_err(|_| "Failed to convert") .expect( - "q / (q/$max) < (2 * $max). Macro prevents any type being created that \ + "q / ceil(q/$max) < $max. Macro prevents any type being created that \ does not satisfy this; qed" ); @@ -159,11 +379,14 @@ macro_rules! implement_per_thing { } } - /// Implement const functions impl $name { /// From an explicitly defined number of parts per maximum of the type. /// /// This can be called at compile time. + // needed only for peru16. Since peru16 is the only type in which $max == + // $type::max_value(), rustc is being a smart-a** here by warning that the comparison + // is not needed. + #[allow(unused_comparisons)] pub const fn from_parts(parts: $type) -> Self { Self([parts, $max][(parts > $max) as usize]) } @@ -172,26 +395,110 @@ macro_rules! implement_per_thing { /// /// This can be created at compile time. pub const fn from_percent(x: $type) -> Self { - Self([x, 100][(x > 100) as usize] * ($max / 100)) + Self(([x, 100][(x > 100) as usize] as $upper_type * $max as $upper_type / 100) as $type) } - /// Everything. - /// - /// To avoid having to import `PerThing` when one needs to be used in test mocks. - #[cfg(feature = "std")] + /// See [`PerThing::one`]. pub fn one() -> Self { ::one() } + + /// See [`PerThing::is_one`]. + pub fn is_one(&self) -> bool { + PerThing::is_one(self) + } + + /// See [`PerThing::zero`]. + pub fn zero() -> Self { + ::zero() + } + + /// See [`PerThing::is_zero`]. + pub fn is_zero(&self) -> bool { + PerThing::is_zero(self) + } + + /// See [`PerThing::deconstruct`]. + pub fn deconstruct(self) -> $type { + PerThing::deconstruct(self) + } + + /// See [`PerThing::square`]. + pub fn square(self) -> Self { + PerThing::square(self) + } + + /// See [`PerThing::from_fraction`]. + #[cfg(feature = "std")] + pub fn from_fraction(x: f64) -> Self { + ::from_fraction(x) + } + + /// See [`PerThing::from_rational_approximation`]. + pub fn from_rational_approximation(p: N, q: N) -> Self + where N: Clone + Ord + From<$type> + TryInto<$type> + + TryInto<$upper_type> + ops::Div + ops::Rem + + ops::Add { + ::from_rational_approximation(p, q) + } + + /// See [`PerThing::mul_floor`]. + pub fn mul_floor(self, b: N) -> N + where N: Clone + From<$type> + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add { + PerThing::mul_floor(self, b) + } + + /// See [`PerThing::mul_ceil`]. + pub fn mul_ceil(self, b: N) -> N + where N: Clone + From<$type> + UniqueSaturatedInto<$type> + + ops::Rem + ops::Div + ops::Mul + + ops::Add { + PerThing::mul_ceil(self, b) + } + + /// See [`PerThing::saturating_reciprocal_mul`]. + pub fn saturating_reciprocal_mul(self, b: N) -> N + where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating { + PerThing::saturating_reciprocal_mul(self, b) + } + + /// See [`PerThing::saturating_reciprocal_mul_floor`]. + pub fn saturating_reciprocal_mul_floor(self, b: N) -> N + where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating { + PerThing::saturating_reciprocal_mul_floor(self, b) + } + + /// See [`PerThing::saturating_reciprocal_mul_ceil`]. + pub fn saturating_reciprocal_mul_ceil(self, b: N) -> N + where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem + + ops::Div + ops::Mul + ops::Add + + Saturating { + PerThing::saturating_reciprocal_mul_ceil(self, b) + } } impl Saturating for $name { + /// Saturating addition. Compute `self + rhs`, saturating at the numeric bounds instead of + /// overflowing. This operation is lossless if it does not saturate. fn saturating_add(self, rhs: Self) -> Self { // defensive-only: since `$max * 2 < $type::max_value()`, this can never overflow. Self::from_parts(self.0.saturating_add(rhs.0)) } + + /// Saturating subtraction. Compute `self - rhs`, saturating at the numeric bounds instead of + /// overflowing. This operation is lossless if it does not saturate. fn saturating_sub(self, rhs: Self) -> Self { Self::from_parts(self.0.saturating_sub(rhs.0)) } + + /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of + /// overflowing. This operation is lossy. fn saturating_mul(self, rhs: Self) -> Self { let a = self.0 as $upper_type; let b = rhs.0 as $upper_type; @@ -200,6 +507,53 @@ macro_rules! implement_per_thing { // This will always fit into $type. Self::from_parts(parts as $type) } + + /// Saturating exponentiation. Computes `self.pow(exp)`, saturating at the numeric + /// bounds instead of overflowing. This operation is lossy. + fn saturating_pow(self, exp: usize) -> Self { + if self.is_zero() || self.is_one() { + self + } else { + let p = <$name as PerThing>::Upper::from(self.deconstruct()); + let q = <$name as PerThing>::Upper::from(Self::ACCURACY); + let mut s = Self::one(); + for _ in 0..exp { + if s.is_zero() { + break; + } else { + // x^2 always fits in Self::Upper if x fits in Self::Inner. + // Verified by a test. + s = Self::from_rational_approximation( + <$name as PerThing>::Upper::from(s.deconstruct()) * p, + q * q, + ); + } + } + s + } + } + } + + impl codec::Decode for $name { + fn decode(input: &mut I) -> Result { + let inner = <$type as codec::Decode>::decode(input)?; + + if inner <= ::ACCURACY { + Ok(Self(inner)) + } else { + Err("Value is greater than allowed maximum!".into()) + } + } + } + + impl crate::traits::Bounded for $name { + fn min_value() -> Self { + ::zero() + } + + fn max_value() -> Self { + ::one() + } } impl ops::Div for $name { @@ -212,9 +566,9 @@ macro_rules! implement_per_thing { } } - /// Overflow-prune multiplication. + /// Non-overflow multiplication. /// - /// tailored to be used with a balance type. + /// This is tailored to be used with a balance type. impl ops::Mul for $name where N: Clone + From<$type> + UniqueSaturatedInto<$type> + ops::Rem @@ -222,36 +576,7 @@ macro_rules! implement_per_thing { { type Output = N; fn mul(self, b: N) -> Self::Output { - let maximum: N = $max.into(); - let upper_max: $upper_type = $max.into(); - let part: N = self.0.into(); - - let rem_multiplied_divided = { - let rem = b.clone().rem(maximum.clone()); - - // `rem_sized` is inferior to $max, thus it fits into $type. This is assured by - // a test. - let rem_sized = rem.saturated_into::<$type>(); - - // `self` and `rem_sized` are inferior to $max, thus the product is less than - // $max^2 and fits into $upper_type. This is assured by a test. - let rem_multiplied_upper = rem_sized as $upper_type * self.0 as $upper_type; - - // `rem_multiplied_upper` is less than $max^2 therefore divided by $max it fits - // in $type. remember that $type always fits $max. - let mut rem_multiplied_divided_sized = - (rem_multiplied_upper / upper_max) as $type; - // fix a tiny rounding error - if rem_multiplied_upper % upper_max > upper_max / 2 { - rem_multiplied_divided_sized += 1; - } - - // `rem_multiplied_divided_sized` is inferior to b, thus it can be converted - // back to N type - rem_multiplied_divided_sized.into() - }; - - (b / maximum) * part + rem_multiplied_divided + overflow_prune_mul::(b, self.deconstruct(), Rounding::Nearest) } } @@ -261,20 +586,25 @@ macro_rules! implement_per_thing { use super::{$name, Saturating, RuntimeDebug, PerThing}; use crate::traits::Zero; - #[test] fn macro_expanded_correctly() { - // needed for the `from_percent` to work. - assert!($max >= 100); - assert!($max % 100 == 0); + // needed for the `from_percent` to work. UPDATE: this is no longer needed; yet note + // that tests that use percentage or fractions such as $name::from_fraction(0.2) to + // create values will most likely be inaccurate when used with per_things that are + // not multiples of 100. + // assert!($max >= 100); + // assert!($max % 100 == 0); // needed for `from_rational_approximation` - assert!(2 * $max < <$type>::max_value()); + assert!(2 * ($max as $upper_type) < <$upper_type>::max_value()); assert!(<$upper_type>::from($max) < <$upper_type>::max_value()); // for something like percent they can be the same. assert!((<$type>::max_value() as $upper_type) <= <$upper_type>::max_value()); assert!(<$upper_type>::from($max).checked_mul($max.into()).is_some()); + + // make sure saturating_pow won't overflow the upper type + assert!(<$upper_type>::from($max) * <$upper_type>::from($max) < <$upper_type>::max_value()); } #[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] @@ -298,7 +628,7 @@ macro_rules! implement_per_thing { (63, 1), (64, 2), (65, 2), - (<$type>::max_value(), <$type>::max_value().encode().len() + 1) + // (<$type>::max_value(), <$type>::max_value().encode().len() + 1) ]; for &(n, l) in &tests { let compact: codec::Compact<$name> = $name(n).into(); @@ -311,39 +641,100 @@ macro_rules! implement_per_thing { } } + #[test] + fn fail_on_invalid_encoded_value() { + let value = <$upper_type>::from($max) * 2; + let casted = value as $type; + let encoded = casted.encode(); + + // For types where `$max == $type::maximum()` we can not + if <$upper_type>::from(casted) == value { + assert_eq!( + $name::decode(&mut &encoded[..]), + Err("Value is greater than allowed maximum!".into()), + ); + } + } + #[test] fn per_thing_api_works() { // some really basic stuff assert_eq!($name::zero(), $name::from_parts(Zero::zero())); assert_eq!($name::one(), $name::from_parts($max)); assert_eq!($name::ACCURACY, $max); + assert_eq!($name::from_percent(0), $name::from_parts(Zero::zero())); assert_eq!($name::from_percent(10), $name::from_parts($max / 10)); assert_eq!($name::from_percent(100), $name::from_parts($max)); + assert_eq!($name::from_percent(200), $name::from_parts($max)); + + assert_eq!($name::from_fraction(0.0), $name::from_parts(Zero::zero())); + assert_eq!($name::from_fraction(0.1), $name::from_parts($max / 10)); + assert_eq!($name::from_fraction(1.0), $name::from_parts($max)); + } + + macro_rules! u256ify { + ($val:expr) => { + Into::::into($val) + }; } macro_rules! per_thing_mul_test { ($num_type:tt) => { // multiplication from all sort of from_percent assert_eq!( - $name::from_percent(100) * $num_type::max_value(), + $name::from_fraction(1.0) * $num_type::max_value(), $num_type::max_value() ); - assert_eq_error_rate!( - $name::from_percent(99) * $num_type::max_value(), - ((Into::::into($num_type::max_value()) * 99u32) / 100u32).as_u128() as $num_type, - 1, - ); - assert_eq!( - $name::from_percent(50) * $num_type::max_value(), - $num_type::max_value() / 2, - ); - assert_eq_error_rate!( - $name::from_percent(1) * $num_type::max_value(), - $num_type::max_value() / 100, - 1, - ); - assert_eq!($name::from_percent(0) * $num_type::max_value(), 0); + if $max % 100 == 0 { + assert_eq_error_rate!( + $name::from_percent(99) * $num_type::max_value(), + ((Into::::into($num_type::max_value()) * 99u32) / 100u32).as_u128() as $num_type, + 1, + ); + assert_eq!( + $name::from_fraction(0.5) * $num_type::max_value(), + $num_type::max_value() / 2, + ); + assert_eq_error_rate!( + $name::from_percent(1) * $num_type::max_value(), + $num_type::max_value() / 100, + 1, + ); + } else { + assert_eq!( + $name::from_fraction(0.99) * <$num_type>::max_value(), + ( + ( + u256ify!($name::from_fraction(0.99).0) * + u256ify!(<$num_type>::max_value()) / + u256ify!($max) + ).as_u128() + ) as $num_type, + ); + assert_eq!( + $name::from_fraction(0.50) * <$num_type>::max_value(), + ( + ( + u256ify!($name::from_fraction(0.50).0) * + u256ify!(<$num_type>::max_value()) / + u256ify!($max) + ).as_u128() + ) as $num_type, + ); + assert_eq!( + $name::from_fraction(0.01) * <$num_type>::max_value(), + ( + ( + u256ify!($name::from_fraction(0.01).0) * + u256ify!(<$num_type>::max_value()) / + u256ify!($max) + ).as_u128() + ) as $num_type, + ); + } + + assert_eq!($name::from_fraction(0.0) * $num_type::max_value(), 0); // // multiplication with bounds assert_eq!($name::one() * $num_type::max_value(), $num_type::max_value()); @@ -356,17 +747,20 @@ macro_rules! implement_per_thing { use primitive_types::U256; // accuracy test - assert_eq!($name::from_rational_approximation(1 as $type, 3) * 30 as $type, 10); + assert_eq!( + $name::from_rational_approximation(1 as $type, 3) * 30 as $type, + 10, + ); $(per_thing_mul_test!($test_units);)* } #[test] fn per_thing_mul_rounds_to_nearest_number() { - assert_eq!($name::from_percent(33) * 10u64, 3); - assert_eq!($name::from_percent(34) * 10u64, 3); - assert_eq!($name::from_percent(35) * 10u64, 3); - assert_eq!($name::from_percent(36) * 10u64, 4); + assert_eq!($name::from_fraction(0.33) * 10u64, 3); + assert_eq!($name::from_fraction(0.34) * 10u64, 3); + assert_eq!($name::from_fraction(0.35) * 10u64, 3); + assert_eq!($name::from_fraction(0.36) * 10u64, 4); } #[test] @@ -398,31 +792,32 @@ macro_rules! implement_per_thing { ); assert_eq!( $name::from_rational_approximation(1 as $num_type, 10), - $name::from_percent(10), + $name::from_fraction(0.10), ); assert_eq!( $name::from_rational_approximation(1 as $num_type, 4), - $name::from_percent(25), + $name::from_fraction(0.25), ); assert_eq!( $name::from_rational_approximation(1 as $num_type, 4), $name::from_rational_approximation(2 as $num_type, 8), ); // no accurate anymore but won't overflow. - assert_eq!( + assert_eq_error_rate!( $name::from_rational_approximation( $num_type::max_value() - 1, $num_type::max_value() - ), - $name::one(), + ).0 as $upper_type, + $name::one().0 as $upper_type, + 2, ); assert_eq_error_rate!( $name::from_rational_approximation( $num_type::max_value() / 3, $num_type::max_value() - ).0, - $name::from_parts($max / 3).0, - 2 + ).0 as $upper_type, + $name::from_parts($max / 3).0 as $upper_type, + 2, ); assert_eq!( $name::from_rational_approximation(1, $num_type::max_value()), @@ -436,13 +831,14 @@ macro_rules! implement_per_thing { // This is just to make sure something like Percent which _might_ get built from a // u8 does not overflow in the context of this test. let max_value = <$upper_type>::from($max); + // almost at the edge assert_eq!( - $name::from_rational_approximation($max - 1, $max + 1), + $name::from_rational_approximation(max_value - 1, max_value + 1), $name::from_parts($max - 2), ); assert_eq!( - $name::from_rational_approximation(1, $max-1), + $name::from_rational_approximation(1, $max - 1), $name::from_parts(1), ); assert_eq!( @@ -450,76 +846,83 @@ macro_rules! implement_per_thing { $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(2, 2 * $max - 1), + $name::from_rational_approximation(2, 2 * max_value - 1), $name::from_parts(1), ); assert_eq!( - $name::from_rational_approximation(1, $max+1), + $name::from_rational_approximation(1, max_value + 1), $name::zero(), ); assert_eq!( $name::from_rational_approximation(3 * max_value / 2, 3 * max_value), - $name::from_percent(50), + $name::from_fraction(0.5), ); + $(per_thing_from_rationale_approx_test!($test_units);)* } #[test] fn per_things_mul_operates_in_output_type() { - // assert_eq!($name::from_percent(50) * 100u32, 50u32); - assert_eq!($name::from_percent(50) * 100u64, 50u64); - assert_eq!($name::from_percent(50) * 100u128, 50u128); + // assert_eq!($name::from_fraction(0.5) * 100u32, 50u32); + assert_eq!($name::from_fraction(0.5) * 100u64, 50u64); + assert_eq!($name::from_fraction(0.5) * 100u128, 50u128); } #[test] fn per_thing_saturating_op_works() { - assert_eq!( - $name::from_percent(50).saturating_add($name::from_percent(40)), - $name::from_percent(90) + assert_eq_error_rate!( + $name::from_fraction(0.5).saturating_add($name::from_fraction(0.4)).0 as $upper_type, + $name::from_fraction(0.9).0 as $upper_type, + 2, ); - assert_eq!( - $name::from_percent(50).saturating_add($name::from_percent(50)), - $name::from_percent(100) + assert_eq_error_rate!( + $name::from_fraction(0.5).saturating_add($name::from_fraction(0.5)).0 as $upper_type, + $name::one().0 as $upper_type, + 2, ); assert_eq!( - $name::from_percent(60).saturating_add($name::from_percent(50)), - $name::from_percent(100) + $name::from_fraction(0.6).saturating_add($name::from_fraction(0.5)), + $name::one(), ); - assert_eq!( - $name::from_percent(60).saturating_sub($name::from_percent(50)), - $name::from_percent(10) + assert_eq_error_rate!( + $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.5)).0 as $upper_type, + $name::from_fraction(0.1).0 as $upper_type, + 2, ); assert_eq!( - $name::from_percent(60).saturating_sub($name::from_percent(60)), - $name::from_percent(0) + $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.6)), + $name::from_fraction(0.0), ); assert_eq!( - $name::from_percent(60).saturating_sub($name::from_percent(70)), - $name::from_percent(0) + $name::from_fraction(0.6).saturating_sub($name::from_fraction(0.7)), + $name::from_fraction(0.0), ); - assert_eq!( - $name::from_percent(50).saturating_mul($name::from_percent(50)), - $name::from_percent(25) + assert_eq_error_rate!( + $name::from_fraction(0.5).saturating_mul($name::from_fraction(0.5)).0 as $upper_type, + $name::from_fraction(0.25).0 as $upper_type, + 2, ); - assert_eq!( - $name::from_percent(20).saturating_mul($name::from_percent(20)), - $name::from_percent(4) + assert_eq_error_rate!( + $name::from_fraction(0.2).saturating_mul($name::from_fraction(0.2)).0 as $upper_type, + $name::from_fraction(0.04).0 as $upper_type, + 2, ); - assert_eq!( - $name::from_percent(10).saturating_mul($name::from_percent(10)), - $name::from_percent(1) + assert_eq_error_rate!( + $name::from_fraction(0.1).saturating_mul($name::from_fraction(0.1)).0 as $upper_type, + $name::from_fraction(0.01).0 as $upper_type, + 1, ); } #[test] fn per_thing_square_works() { - assert_eq!($name::from_percent(100).square(), $name::from_percent(100)); - assert_eq!($name::from_percent(50).square(), $name::from_percent(25)); - assert_eq!($name::from_percent(10).square(), $name::from_percent(1)); + assert_eq!($name::from_fraction(1.0).square(), $name::from_fraction(1.0)); + assert_eq!($name::from_fraction(0.5).square(), $name::from_fraction(0.25)); + assert_eq!($name::from_fraction(0.1).square(), $name::from_fraction(0.01)); assert_eq!( - $name::from_percent(2).square(), + $name::from_fraction(0.02).square(), $name::from_parts((4 * <$upper_type>::from($max) / 100 / 100) as $type) ); } @@ -527,22 +930,203 @@ macro_rules! implement_per_thing { #[test] fn per_things_div_works() { // normal - assert_eq!($name::from_percent(10) / $name::from_percent(20), - $name::from_percent(50) + assert_eq_error_rate!( + ($name::from_fraction(0.1) / $name::from_fraction(0.20)).0 as $upper_type, + $name::from_fraction(0.50).0 as $upper_type, + 2, ); - assert_eq!($name::from_percent(10) / $name::from_percent(10), - $name::from_percent(100) + assert_eq_error_rate!( + ($name::from_fraction(0.1) / $name::from_fraction(0.10)).0 as $upper_type, + $name::from_fraction(1.0).0 as $upper_type, + 2, ); - assert_eq!($name::from_percent(10) / $name::from_percent(0), - $name::from_percent(100) + assert_eq_error_rate!( + ($name::from_fraction(0.1) / $name::from_fraction(0.0)).0 as $upper_type, + $name::from_fraction(1.0).0 as $upper_type, + 2, ); // will not overflow - assert_eq!($name::from_percent(10) / $name::from_percent(5), - $name::from_percent(100) + assert_eq_error_rate!( + ($name::from_fraction(0.10) / $name::from_fraction(0.05)).0 as $upper_type, + $name::from_fraction(1.0).0 as $upper_type, + 2, + ); + assert_eq_error_rate!( + ($name::from_fraction(1.0) / $name::from_fraction(0.5)).0 as $upper_type, + $name::from_fraction(1.0).0 as $upper_type, + 2, + ); + } + + #[test] + fn saturating_pow_works() { + // x^0 == 1 + assert_eq!( + $name::from_parts($max / 2).saturating_pow(0), + $name::from_parts($max), + ); + + // x^1 == x + assert_eq!( + $name::from_parts($max / 2).saturating_pow(1), + $name::from_parts($max / 2), + ); + + // x^2 + assert_eq!( + $name::from_parts($max / 2).saturating_pow(2), + $name::from_parts($max / 2).square(), + ); + + // x^3 + assert_eq!( + $name::from_parts($max / 2).saturating_pow(3), + $name::from_parts($max / 8), + ); + + // 0^n == 0 + assert_eq!( + $name::from_parts(0).saturating_pow(3), + $name::from_parts(0), ); - assert_eq!($name::from_percent(100) / $name::from_percent(50), - $name::from_percent(100) + + // 1^n == 1 + assert_eq!( + $name::from_parts($max).saturating_pow(3), + $name::from_parts($max), + ); + + // (x < 1)^inf == 0 (where 2.pow(31) ~ inf) + assert_eq!( + $name::from_parts($max / 2).saturating_pow(2usize.pow(31)), + $name::from_parts(0), + ); + } + + #[test] + fn saturating_reciprocal_mul_works() { + // divide by 1 + assert_eq!( + $name::from_parts($max).saturating_reciprocal_mul(<$type>::from(10u8)), + 10, + ); + // divide by 1/2 + assert_eq!( + $name::from_parts($max / 2).saturating_reciprocal_mul(<$type>::from(10u8)), + 20, + ); + // saturate + assert_eq!( + $name::from_parts(1).saturating_reciprocal_mul($max), + <$type>::max_value(), + ); + // round to nearest + assert_eq!( + $name::from_percent(60).saturating_reciprocal_mul(<$type>::from(10u8)), + 17, + ); + // round down + assert_eq!( + $name::from_percent(60).saturating_reciprocal_mul_floor(<$type>::from(10u8)), + 16, + ); + // round to nearest + assert_eq!( + $name::from_percent(61).saturating_reciprocal_mul(<$type>::from(10u8)), + 16, + ); + // round up + assert_eq!( + $name::from_percent(61).saturating_reciprocal_mul_ceil(<$type>::from(10u8)), + 17, + ); + } + + #[test] + fn saturating_truncating_mul_works() { + assert_eq!( + $name::from_percent(49).mul_floor(10 as $type), + 4, + ); + let a: $upper_type = $name::from_percent(50).mul_floor(($max as $upper_type).pow(2)); + let b: $upper_type = ($max as $upper_type).pow(2) / 2; + if $max % 2 == 0 { + assert_eq!(a, b); + } else { + // difference should be less that 1%, IE less than the error in `from_percent` + assert!(b - a < ($max as $upper_type).pow(2) / 100 as $upper_type); + } + } + + #[test] + fn rational_mul_correction_works() { + assert_eq!( + super::rational_mul_correction::<$type, $name>( + <$type>::max_value(), + <$type>::max_value(), + <$type>::max_value(), + super::Rounding::Nearest, + ), + 0, + ); + assert_eq!( + super::rational_mul_correction::<$type, $name>( + <$type>::max_value() - 1, + <$type>::max_value(), + <$type>::max_value(), + super::Rounding::Nearest, + ), + <$type>::max_value() - 1, + ); + assert_eq!( + super::rational_mul_correction::<$upper_type, $name>( + ((<$type>::max_value() - 1) as $upper_type).pow(2), + <$type>::max_value(), + <$type>::max_value(), + super::Rounding::Nearest, + ), + 1, + ); + // ((max^2 - 1) % max) * max / max == max - 1 + assert_eq!( + super::rational_mul_correction::<$upper_type, $name>( + (<$type>::max_value() as $upper_type).pow(2) - 1, + <$type>::max_value(), + <$type>::max_value(), + super::Rounding::Nearest, + ), + (<$type>::max_value() - 1).into(), + ); + // (max % 2) * max / 2 == max / 2 + assert_eq!( + super::rational_mul_correction::<$upper_type, $name>( + (<$type>::max_value() as $upper_type).pow(2), + <$type>::max_value(), + 2 as $type, + super::Rounding::Nearest, + ), + <$type>::max_value() as $upper_type / 2, + ); + // ((max^2 - 1) % max) * 2 / max == 2 (rounded up) + assert_eq!( + super::rational_mul_correction::<$upper_type, $name>( + (<$type>::max_value() as $upper_type).pow(2) - 1, + 2 as $type, + <$type>::max_value(), + super::Rounding::Nearest, + ), + 2, + ); + // ((max^2 - 1) % max) * 2 / max == 1 (rounded down) + assert_eq!( + super::rational_mul_correction::<$upper_type, $name>( + (<$type>::max_value() as $upper_type).pow(2) - 1, + 2 as $type, + <$type>::max_value(), + super::Rounding::Down, + ), + 1, ); } } @@ -558,6 +1142,15 @@ implement_per_thing!( u16, "_Percent_", ); +implement_per_thing!( + PerU16, + test_peru16, + [u32, u64, u128], + 65535_u16, + u16, + u32, + "_Parts per 65535_", +); implement_per_thing!( Permill, test_permill, diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 75adf0e1363dd6c460707d06a4852f2f39c4fbe2..23f8f23f0bd77b783dfc1908b0efc6354f9fc4a8 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -21,7 +21,7 @@ use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr + CheckedShl, CheckedShr, checked_pow }; use sp_std::ops::{ Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, @@ -104,29 +104,41 @@ impl + Sized> UniqueSaturatedInto for S { } } -/// Simple trait to use checked mul and max value to give a saturated mul operation over -/// supported types. +/// Saturating arithmetic operations, returning maximum or minimum values instead of overflowing. pub trait Saturating { - /// Saturated addition - if the product can't fit in the type then just use max-value. - fn saturating_add(self, o: Self) -> Self; - - /// Saturated subtraction - if the product can't fit in the type then just use max-value. - fn saturating_sub(self, o: Self) -> Self; - - /// Saturated multiply - if the product can't fit in the type then just use max-value. - fn saturating_mul(self, o: Self) -> Self; + /// Saturating addition. Compute `self + rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_add(self, rhs: Self) -> Self; + + /// Saturating subtraction. Compute `self - rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_sub(self, rhs: Self) -> Self; + + /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_mul(self, rhs: Self) -> Self; + + /// Saturating exponentiation. Compute `self.pow(exp)`, saturating at the numeric bounds + /// instead of overflowing. + fn saturating_pow(self, exp: usize) -> Self; } -impl Saturating for T { +impl Saturating for T { fn saturating_add(self, o: Self) -> Self { ::saturating_add(self, o) } + fn saturating_sub(self, o: Self) -> Self { ::saturating_sub(self, o) } + fn saturating_mul(self, o: Self) -> Self { self.checked_mul(&o).unwrap_or_else(Bounded::max_value) } + + fn saturating_pow(self, exp: usize) -> Self { + checked_pow(self, exp).unwrap_or_else(Bounded::max_value) + } } /// Convenience type to work around the highly unergonomic syntax needed diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index b4197c56d7e5ff912070b08af284a9693e74a73e..f37b67fab1ce8d476fef66656b3c5013701edbf9 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authority-discovery" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" @@ -9,11 +9,11 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", default-features = false, version = "1.2.0" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.3.0" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } [features] default = ["std"] @@ -24,3 +24,6 @@ std = [ "sp-api/std", "sp-runtime/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/authority-discovery/src/lib.rs b/primitives/authority-discovery/src/lib.rs index 8926825525976f9d284bc1b6969ddfde936aff99..68680ad75946545f93d8d0c5ef5c3004a3623efc 100644 --- a/primitives/authority-discovery/src/lib.rs +++ b/primitives/authority-discovery/src/lib.rs @@ -21,8 +21,25 @@ use sp_std::vec::Vec; mod app { - use sp_application_crypto::{app_crypto, key_types::AUTHORITY_DISCOVERY, sr25519}; + use sp_application_crypto::{ + CryptoTypePublicPair, + key_types::AUTHORITY_DISCOVERY, + Public as _, + app_crypto, + sr25519}; app_crypto!(sr25519, AUTHORITY_DISCOVERY); + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(sr25519::CRYPTO_ID, key.to_raw_vec()) + } + } } sp_application_crypto::with_pair! { diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index c5215631cca28c413460b2e77107cd1c575e7122..a4d5aa03c2382c3c93f278bca825c7ca08688951 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-authorship" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" @@ -9,10 +9,10 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] @@ -22,3 +22,6 @@ std = [ "sp-inherents/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index b5cb592d55b4109264d57b99c349d901626b8706..df33b2c955ff096b1bd861769d1dd5c56497a867 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-block-builder" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,11 +9,11 @@ repository = "https://github.com/paritytech/substrate/" description = "The block builder runtime api." [dependencies] -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../inherents" } [features] default = [ "std" ] @@ -24,3 +24,6 @@ std = [ "sp-api/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 5fb8ce3574719cd3af0f0231b682c38110e763cc..49e6baead1fb82a919b3d7ca40fe0a6045ee0524 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-blockchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -15,8 +15,11 @@ log = "0.4.8" lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8.0-alpha.2", path = "../consensus/common" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } -sp-block-builder = { version = "2.0.0-alpha.2", path = "../block-builder" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../state-machine" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.5", path = "../consensus/common" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } +sp-block-builder = { version = "2.0.0-alpha.5", path = "../block-builder" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../state-machine" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 35cac1e481120e2a88f9dafeebfc26f6e7287326..e92dfd8c98e89397ee2e3d10e6f85e71b0c5ac00 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -254,6 +254,8 @@ pub struct Info { pub finalized_hash: Block::Hash, /// Last finalized block number. pub finalized_number: <::Header as HeaderT>::Number, + /// Number of concurrent leave forks. + pub number_leaves: usize } /// Block status. diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index a4ec9c29952ab986f2615bec87dee37c7de81fae..e479b8abe918ee881dbb4fa79cdf824cfe6fb14e 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -127,6 +127,8 @@ pub enum Error { /// Incomplete block import pipeline. #[display(fmt = "Incomplete block import pipeline.")] IncompletePipeline, + #[display(fmt = "Transaction pool not ready for block production.")] + TransactionPoolNotReady, /// A convenience variant for String #[display(fmt = "{}", _0)] Msg(String), diff --git a/primitives/chain-spec/Cargo.toml b/primitives/chain-spec/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..62d64bf614d9381187a2a689631cc100d693ed16 --- /dev/null +++ b/primitives/chain-spec/Cargo.toml @@ -0,0 +1,13 @@ +[package] +name = "sp-chain-spec" +version = "2.0.0-alpha.5" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate chain configurations types." + +[dependencies] +serde = { version = "1.0.101", features = ["derive"] } +serde_json = "1.0.41" diff --git a/primitives/chain-spec/src/lib.rs b/primitives/chain-spec/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..13ebc09b6c0f0646b92d21056bbc1ef217f0ca3d --- /dev/null +++ b/primitives/chain-spec/src/lib.rs @@ -0,0 +1,42 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Types and traits related to chain specifications. + +/// The type of a chain. +/// +/// This can be used by tools to determine the type of a chain for displaying +/// additional information or enabling additional features. +#[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] +pub enum ChainType { + /// A development chain that runs mainly on one node. + Development, + /// A local chain that runs locally on multiple nodes for testing purposes. + Local, + /// A live chain. + Live, + /// Some custom chain type. + Custom(String), +} + +impl Default for ChainType { + fn default() -> Self { + Self::Live + } +} + +/// Arbitrary properties defined in chain spec as a JSON object +pub type Properties = serde_json::map::Map; diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 04d411939975f096b6b3d3536d7a6d992ddbe55b..99ce51a229c72e045e97b92f9e8c8f8d3aff2513 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-aura" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -9,13 +9,13 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../timestamp" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../runtime" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../timestamp" } [features] default = ["std"] @@ -28,3 +28,6 @@ std = [ "sp-inherents/std", "sp-timestamp/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index f910b73ed6e0045ce475b884eafc86a6c0389f86..195a54a5910c419957f053a6f928aa369b467110 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-babe" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" @@ -9,15 +9,15 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8.0-alpha.2", optional = true, path = "../common" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../inherents" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../timestamp" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0-alpha.5", optional = true, path = "../common" } +sp-consensus-vrf = { version = "0.8.0-alpha.5", path = "../vrf", default-features = false } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../inherents" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../timestamp" } [features] default = ["std"] @@ -25,10 +25,13 @@ std = [ "sp-application-crypto/std", "codec/std", "sp-std/std", - "schnorrkel", "sp-api/std", "sp-consensus", + "sp-consensus-vrf/std", "sp-inherents/std", "sp-runtime/std", "sp-timestamp/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 7ec0f9b977cc7d921d64e0605db52d5c2c5bc3f3..6079aa88c87492538fbf3caefdd3775d4cdd9292 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -18,65 +18,96 @@ #[cfg(feature = "std")] use super::{BABE_ENGINE_ID, AuthoritySignature}; -#[cfg(not(feature = "std"))] -use super::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}; use super::{AuthorityId, AuthorityIndex, SlotNumber, BabeAuthorityWeight}; #[cfg(feature = "std")] use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; #[cfg(feature = "std")] -use std::fmt::Debug; +use std::{fmt::Debug, convert::{TryFrom, TryInto}}; use codec::{Decode, Encode}; #[cfg(feature = "std")] -use codec::{Codec, Input, Error}; -#[cfg(feature = "std")] -use schnorrkel::{ - SignatureError, errors::MultiSignatureStage, - vrf::{VRFProof, VRFOutput, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH} -}; +use codec::Codec; use sp_std::vec::Vec; +use sp_runtime::RuntimeDebug; +use sp_consensus_vrf::schnorrkel::{self, Randomness}; +#[cfg(feature = "std")] +use sp_consensus_vrf::schnorrkel::SignatureError; + +/// Raw BABE primary slot assignment pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct RawPrimaryPreDigest { + /// Authority index + pub authority_index: super::AuthorityIndex, + /// Slot number + pub slot_number: SlotNumber, + /// VRF output + pub vrf_output: VRFOutput, + /// VRF proof + pub vrf_proof: VRFProof, +} + +#[cfg(feature = "std")] +/// BABE primary slot assignment pre-digest for std environment. +pub type PrimaryPreDigest = RawPrimaryPreDigest; +#[cfg(feature = "std")] +impl TryFrom for PrimaryPreDigest { + type Error = SignatureError; + + fn try_from(raw: RawPrimaryPreDigest) -> Result { + Ok(PrimaryPreDigest { + authority_index: raw.authority_index, + slot_number: raw.slot_number, + vrf_output: raw.vrf_output.try_into()?, + vrf_proof: raw.vrf_proof.try_into()?, + }) + } +} + +/// BABE secondary slot assignment pre-digest. +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub struct SecondaryPreDigest { + /// Authority index + /// + /// This is not strictly-speaking necessary, since the secondary slots + /// are assigned based on slot number and epoch randomness. But including + /// it makes things easier for higher-level users of the chain data to + /// be aware of the author of a secondary-slot block. + pub authority_index: super::AuthorityIndex, + /// Slot number + pub slot_number: SlotNumber, +} /// A BABE pre-runtime digest. This contains all data required to validate a /// block and for the BABE runtime module. Slots can be assigned to a primary /// (VRF based) and to a secondary (slot number based). -#[cfg(feature = "std")] -#[derive(Clone, Debug)] -pub enum PreDigest { +#[derive(Clone, RuntimeDebug, Encode, Decode)] +pub enum RawPreDigest { /// A primary VRF-based slot assignment. - Primary { - /// VRF output - vrf_output: VRFOutput, - /// VRF proof - vrf_proof: VRFProof, - /// Authority index - authority_index: super::AuthorityIndex, - /// Slot number - slot_number: SlotNumber, - }, + #[codec(index = "1")] + Primary(RawPrimaryPreDigest), /// A secondary deterministic slot assignment. - Secondary { - /// Authority index - authority_index: super::AuthorityIndex, - /// Slot number - slot_number: SlotNumber, - }, + #[codec(index = "2")] + Secondary(SecondaryPreDigest), } #[cfg(feature = "std")] -impl PreDigest { +/// A BABE pre-runtime digest for std. +pub type PreDigest = RawPreDigest; + +impl RawPreDigest { /// Returns the slot number of the pre digest. pub fn authority_index(&self) -> AuthorityIndex { match self { - PreDigest::Primary { authority_index, .. } => *authority_index, - PreDigest::Secondary { authority_index, .. } => *authority_index, + RawPreDigest::Primary(primary) => primary.authority_index, + RawPreDigest::Secondary(secondary) => secondary.authority_index, } } /// Returns the slot number of the pre digest. pub fn slot_number(&self) -> SlotNumber { match self { - PreDigest::Primary { slot_number, .. } => *slot_number, - PreDigest::Secondary { slot_number, .. } => *slot_number, + RawPreDigest::Primary(primary) => primary.slot_number, + RawPreDigest::Secondary(secondary) => secondary.slot_number, } } @@ -84,121 +115,33 @@ impl PreDigest { /// of the chain. pub fn added_weight(&self) -> crate::BabeBlockWeight { match self { - PreDigest::Primary { .. } => 1, - PreDigest::Secondary { .. } => 0, - } - } -} - -/// A raw version of `BabePreDigest`, usable on `no_std`. -#[derive(Copy, Clone, Encode, Decode)] -pub enum RawPreDigest { - /// A primary VRF-based slot assignment. - #[codec(index = "1")] - Primary { - /// Authority index - authority_index: AuthorityIndex, - /// Slot number - slot_number: SlotNumber, - /// VRF output - vrf_output: [u8; VRF_OUTPUT_LENGTH], - /// VRF proof - vrf_proof: [u8; VRF_PROOF_LENGTH], - }, - /// A secondary deterministic slot assignment. - #[codec(index = "2")] - Secondary { - /// Authority index - /// - /// This is not strictly-speaking necessary, since the secondary slots - /// are assigned based on slot number and epoch randomness. But including - /// it makes things easier for higher-level users of the chain data to - /// be aware of the author of a secondary-slot block. - authority_index: AuthorityIndex, - /// Slot number - slot_number: SlotNumber, - }, -} - -impl RawPreDigest { - /// Returns the slot number of the pre digest. - pub fn slot_number(&self) -> SlotNumber { - match self { - RawPreDigest::Primary { slot_number, .. } => *slot_number, - RawPreDigest::Secondary { slot_number, .. } => *slot_number, + RawPreDigest::Primary(_) => 1, + RawPreDigest::Secondary(_) => 0, } } } #[cfg(feature = "std")] -impl Encode for PreDigest { - fn encode(&self) -> Vec { - let raw = match self { - PreDigest::Primary { - vrf_output, - vrf_proof, - authority_index, - slot_number, - } => { - RawPreDigest::Primary { - vrf_output: *vrf_output.as_bytes(), - vrf_proof: vrf_proof.to_bytes(), - authority_index: *authority_index, - slot_number: *slot_number, - } - }, - PreDigest::Secondary { - authority_index, - slot_number, - } => { - RawPreDigest::Secondary { - authority_index: *authority_index, - slot_number: *slot_number, - } - }, - }; - - codec::Encode::encode(&raw) - } -} - -#[cfg(feature = "std")] -impl codec::EncodeLike for PreDigest {} - -#[cfg(feature = "std")] -impl Decode for PreDigest { - fn decode(i: &mut R) -> Result { - let pre_digest = match Decode::decode(i)? { - RawPreDigest::Primary { vrf_output, vrf_proof, authority_index, slot_number } => { - // Verify (at compile time) that the sizes in babe_primitives are correct - let _: [u8; super::VRF_OUTPUT_LENGTH] = vrf_output; - let _: [u8; super::VRF_PROOF_LENGTH] = vrf_proof; - - PreDigest::Primary { - vrf_proof: VRFProof::from_bytes(&vrf_proof).map_err(convert_error)?, - vrf_output: VRFOutput::from_bytes(&vrf_output).map_err(convert_error)?, - authority_index, - slot_number, - } - }, - RawPreDigest::Secondary { authority_index, slot_number } => { - PreDigest::Secondary { authority_index, slot_number } - }, - }; - - Ok(pre_digest) +impl TryFrom for PreDigest { + type Error = SignatureError; + + fn try_from(raw: RawPreDigest) -> Result { + Ok(match raw { + RawPreDigest::Primary(primary) => PreDigest::Primary(primary.try_into()?), + RawPreDigest::Secondary(secondary) => PreDigest::Secondary(secondary), + }) } } /// Information about the next epoch. This is broadcast in the first block /// of the epoch. -#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] +#[derive(Decode, Encode, Default, PartialEq, Eq, Clone, RuntimeDebug)] pub struct NextEpochDescriptor { /// The authorities. pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// The value of randomness to use for the slot-assignment. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, } /// A digest item which is usable with BABE consensus. @@ -248,34 +191,3 @@ impl CompatibleDigestItem for DigestItem where }) } } - -#[cfg(feature = "std")] -fn convert_error(e: SignatureError) -> codec::Error { - use SignatureError::*; - use MultiSignatureStage::*; - match e { - EquationFalse => "Signature error: `EquationFalse`".into(), - PointDecompressionError => "Signature error: `PointDecompressionError`".into(), - ScalarFormatError => "Signature error: `ScalarFormatError`".into(), - NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), - BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigAbsent { musig_stage: Reveal } => - "Signature error: `MuSigAbsent` at stage `Reveal`".into(), - MuSigAbsent { musig_stage: Cosignature } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), - } -} diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 392dcb560bb008202004015ea971dfeddca8b1b4..33701860d1f10234a823c4d625acfc02094f89f5 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -22,6 +22,10 @@ pub mod digests; pub mod inherents; +pub use sp_consensus_vrf::schnorrkel::{ + Randomness, VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH +}; + use codec::{Encode, Decode}; use sp_std::vec::Vec; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; @@ -50,12 +54,6 @@ pub type AuthorityId = app::Public; /// The `ConsensusEngineId` of BABE. pub const BABE_ENGINE_ID: ConsensusEngineId = *b"BABE"; -/// The length of the VRF output -pub const VRF_OUTPUT_LENGTH: usize = 32; - -/// The length of the VRF proof -pub const VRF_PROOF_LENGTH: usize = 64; - /// The length of the public key pub const PUBLIC_KEY_LENGTH: usize = 32; @@ -115,7 +113,7 @@ pub struct BabeConfiguration { pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, /// The randomness for the genesis epoch. - pub randomness: [u8; VRF_OUTPUT_LENGTH], + pub randomness: Randomness, /// Whether this chain should run with secondary slots, which are assigned /// in round-robin manner. diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index e8d3c0c6118d0c4296a0f93aebdcc6640537ca21..4734cde694261bd3d09c4eb688619f8d208c07eb 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,18 +12,19 @@ documentation = "https://docs.rs/sp-consensus/" [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.16.2", default-features = false } +libp2p = { version = "0.17.0", default-features = false } log = "0.4.8" -sp-core = { path= "../../core" , version = "2.0.0-alpha.2"} -sp-inherents = { version = "2.0.0-alpha.2", path = "../../inherents" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core" , version = "2.0.0-alpha.5"} +sp-inherents = { version = "2.0.0-alpha.5", path = "../../inherents" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" futures-diagnose = "1.0" -sp-std = { version = "2.0.0-alpha.2", path = "../../std" } -sp-version = { version = "2.0.0-alpha.2", path = "../../version" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", path = "../../std" } +sp-version = { version = "2.0.0-alpha.5", path = "../../version" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../runtime" } +sp-utils = { version = "2.0.0-alpha.5", path = "../../utils" } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } @@ -32,3 +33,6 @@ sp-test-primitives = { version = "2.0.0-dev", path = "../../test-primitives" } [features] default = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 0d1aed7fb1c72e4225fab3c48cba9faade48c20c..024e473849349f416ce0d84a87222f2fb57bd88b 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -15,10 +15,11 @@ // along with Substrate. If not, see . use std::{mem, pin::Pin, time::Duration, marker::PhantomData, sync::Arc}; -use futures::{prelude::*, channel::mpsc, task::Context, task::Poll}; +use futures::{prelude::*, task::Context, task::Poll}; use futures_timer::Delay; use parking_lot::{Mutex, Condvar}; use sp_runtime::{Justification, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; +use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; use crate::block_import::BlockOrigin; use crate::import_queue::{ @@ -32,7 +33,7 @@ use crate::import_queue::{ /// task, with plugable verification. pub struct BasicQueue { /// Channel to send messages to the background task. - sender: mpsc::UnboundedSender>, + sender: TracingUnboundedSender>, /// Results coming from the worker task. result_port: BufferedLinkReceiver, /// If it isn't possible to spawn the future in `future_to_spawn` (which is notably the case in @@ -195,8 +196,8 @@ impl BlockImportWorker { block_import: BoxBlockImport, justification_import: Option>, finality_proof_import: Option>, - ) -> (impl Future + Send, mpsc::UnboundedSender>) { - let (sender, mut port) = mpsc::unbounded(); + ) -> (impl Future + Send, TracingUnboundedSender>) { + let (sender, mut port) = tracing_unbounded("mpsc_block_import_worker"); let mut worker = BlockImportWorker { result_sender, diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/primitives/consensus/common/src/import_queue/buffered_link.rs index d0f6c87951354f602f5400fa28b95a87673cbef7..ea77fc97f0e8a28e889ea8c13c1644a2f968a734 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -37,8 +37,9 @@ //! ``` //! -use futures::{prelude::*, channel::mpsc}; +use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; use std::{pin::Pin, task::Context, task::Poll}; use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; @@ -46,7 +47,7 @@ use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer /// them to another link. pub fn buffered_link() -> (BufferedLinkSender, BufferedLinkReceiver) { - let (tx, rx) = mpsc::unbounded(); + let (tx, rx) = tracing_unbounded("mpsc_buffered_link"); let tx = BufferedLinkSender { tx }; let rx = BufferedLinkReceiver { rx }; (tx, rx) @@ -54,7 +55,7 @@ pub fn buffered_link() -> (BufferedLinkSender, BufferedLinkReceive /// See [`buffered_link`]. pub struct BufferedLinkSender { - tx: mpsc::UnboundedSender>, + tx: TracingUnboundedSender>, } impl BufferedLinkSender { @@ -125,7 +126,7 @@ impl Link for BufferedLinkSender { /// See [`buffered_link`]. pub struct BufferedLinkReceiver { - rx: mpsc::UnboundedReceiver>, + rx: TracingUnboundedReceiver>, } impl BufferedLinkReceiver { diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 4abead3258f81afdec7243f4c374d26281ac7544..5ca60bb2155590fa34e1368ee1758ce7639871aa 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-consensus-pow" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" @@ -9,11 +9,11 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../api" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../runtime" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../core" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } [features] default = ["std"] @@ -24,3 +24,6 @@ std = [ "sp-core/std", "codec/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/consensus/vrf/Cargo.toml b/primitives/consensus/vrf/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..cf194ec38b50ac75b638044936ceca73172309c8 --- /dev/null +++ b/primitives/consensus/vrf/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "sp-consensus-vrf" +version = "0.8.0-alpha.5" +authors = ["Parity Technologies "] +description = "Primitives for VRF based consensus" +edition = "2018" +license = "GPL-3.0" +repository = "https://github.com/paritytech/substrate/" +homepage = "https://substrate.dev" + +[dependencies] +codec = { version = "1.0.0", package = "parity-scale-codec", default-features = false } +schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated"], optional = true } +sp-std = { version = "2.0.0-alpha.5", path = "../../std", default-features = false } +sp-core = { version = "2.0.0-alpha.5", path = "../../core", default-features = false } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../runtime" } + +[features] +default = ["std"] +std = [ + "codec/std", + "schnorrkel", + "sp-std/std", + "sp-core/std", + "sp-runtime/std", +] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/consensus/vrf/src/lib.rs b/primitives/consensus/vrf/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..4ec6e376d6829f15937e38d569c9edf1779b0927 --- /dev/null +++ b/primitives/consensus/vrf/src/lib.rs @@ -0,0 +1,20 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Primitives for VRF-based consensus engines. +#![cfg_attr(not(feature = "std"), no_std)] + +pub mod schnorrkel; diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs new file mode 100644 index 0000000000000000000000000000000000000000..265572dbdaee78dc4848f075bf22a690ea743a4e --- /dev/null +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -0,0 +1,250 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Schnorrkel-based VRF. + +use codec::{Encode, Decode}; +use sp_runtime::RuntimeDebug; +use sp_std::ops::{Deref, DerefMut}; +#[cfg(feature = "std")] +use std::convert::TryFrom; +#[cfg(feature = "std")] +use codec::EncodeLike; +#[cfg(feature = "std")] +use schnorrkel::errors::MultiSignatureStage; +#[cfg(feature = "std")] +use sp_core::U512; + +#[cfg(feature = "std")] +pub use schnorrkel::{SignatureError, vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}}; + +/// The length of the VRF proof. +#[cfg(not(feature = "std"))] +pub const VRF_PROOF_LENGTH: usize = 64; + +/// The length of the VRF output. +#[cfg(not(feature = "std"))] +pub const VRF_OUTPUT_LENGTH: usize = 32; + +/// The length of the Randomness. +pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; + +/// Raw VRF output. +#[derive(Clone, Copy, Eq, PartialEq, RuntimeDebug, Encode, Decode)] +pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); + +impl Deref for RawVRFOutput { + type Target = [u8; VRF_OUTPUT_LENGTH]; + fn deref(&self) -> &Self::Target { &self.0 } +} + +impl DerefMut for RawVRFOutput { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +/// VRF output type available for `std` environment, suitable for schnorrkel operations. +#[cfg(feature = "std")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); + +#[cfg(feature = "std")] +impl Deref for VRFOutput { + type Target = schnorrkel::vrf::VRFOutput; + fn deref(&self) -> &Self::Target { &self.0 } +} + +#[cfg(feature = "std")] +impl DerefMut for VRFOutput { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +#[cfg(feature = "std")] +impl Encode for VRFOutput { + fn encode(&self) -> Vec { + self.0.as_bytes().encode() + } +} + +#[cfg(feature = "std")] +impl EncodeLike for VRFOutput { } + +#[cfg(feature = "std")] +impl Decode for VRFOutput { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) + } +} + +#[cfg(feature = "std")] +impl TryFrom<[u8; VRF_OUTPUT_LENGTH]> for VRFOutput { + type Error = SignatureError; + + fn try_from(raw: [u8; VRF_OUTPUT_LENGTH]) -> Result { + schnorrkel::vrf::VRFOutput::from_bytes(&raw).map(VRFOutput) + } +} + +#[cfg(feature = "std")] +impl TryFrom for VRFOutput { + type Error = SignatureError; + + fn try_from(raw: RawVRFOutput) -> Result { + schnorrkel::vrf::VRFOutput::from_bytes(&raw.0).map(VRFOutput) + } +} + +#[cfg(feature = "std")] +impl From for RawVRFOutput { + fn from(output: VRFOutput) -> RawVRFOutput { + RawVRFOutput(output.to_bytes()) + } +} + +/// Raw VRF proof. +#[derive(Clone, Copy, Encode, Decode)] +pub struct RawVRFProof(pub [u8; VRF_PROOF_LENGTH]); + +impl Deref for RawVRFProof { + type Target = [u8; VRF_PROOF_LENGTH]; + fn deref(&self) -> &Self::Target { &self.0 } +} + +impl DerefMut for RawVRFProof { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +#[cfg(feature = "std")] +impl std::fmt::Debug for RawVRFProof { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", &self) + } +} + +impl core::cmp::PartialEq for RawVRFProof { + fn eq(&self, other: &Self) -> bool { + self == other + } +} + +impl core::cmp::Eq for RawVRFProof { } + +/// VRF proof type available for `std` environment, suitable for schnorrkel operations. +#[cfg(feature = "std")] +#[derive(Clone, Debug, PartialEq, Eq)] +pub struct VRFProof(pub schnorrkel::vrf::VRFProof); + +#[cfg(feature = "std")] +impl PartialOrd for VRFProof { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +#[cfg(feature = "std")] +impl Ord for VRFProof { + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) + } +} + +#[cfg(feature = "std")] +impl Deref for VRFProof { + type Target = schnorrkel::vrf::VRFProof; + fn deref(&self) -> &Self::Target { &self.0 } +} + +#[cfg(feature = "std")] +impl DerefMut for VRFProof { + fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } +} + +#[cfg(feature = "std")] +impl Encode for VRFProof { + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } +} + +#[cfg(feature = "std")] +impl EncodeLike for VRFProof { } + +#[cfg(feature = "std")] +impl Decode for VRFProof { + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; + Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) + } +} + +#[cfg(feature = "std")] +impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof { + type Error = SignatureError; + + fn try_from(raw: [u8; VRF_PROOF_LENGTH]) -> Result { + schnorrkel::vrf::VRFProof::from_bytes(&raw).map(VRFProof) + } +} + +#[cfg(feature = "std")] +impl TryFrom for VRFProof { + type Error = SignatureError; + + fn try_from(raw: RawVRFProof) -> Result { + schnorrkel::vrf::VRFProof::from_bytes(&raw.0).map(VRFProof) + } +} + +#[cfg(feature = "std")] +impl From for RawVRFProof { + fn from(output: VRFProof) -> RawVRFProof { + RawVRFProof(output.to_bytes()) + } +} + +#[cfg(feature = "std")] +fn convert_error(e: SignatureError) -> codec::Error { + use SignatureError::*; + use MultiSignatureStage::*; + match e { + EquationFalse => "Signature error: `EquationFalse`".into(), + PointDecompressionError => "Signature error: `PointDecompressionError`".into(), + ScalarFormatError => "Signature error: `ScalarFormatError`".into(), + NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), + BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), + MuSigAbsent { musig_stage: Commitment } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigAbsent { musig_stage: Reveal } => + "Signature error: `MuSigAbsent` at stage `Reveal`".into(), + MuSigAbsent { musig_stage: Cosignature } => + "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), + MuSigInconsistent { musig_stage: Commitment, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), + MuSigInconsistent { musig_stage: Reveal, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), + MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => + "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), + } +} + +/// Schnorrkel randomness value. Same size as `VRFOutput`. +pub type Randomness = [u8; RANDOMNESS_LENGTH]; diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 4112aff01f55ae6e4ab8b2ee089e94758d748f8c..7708188ea4bf230e40a06814e5ac0773773da173 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-core" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,45 +10,45 @@ description = "Shareable Substrate types." documentation = "https://docs.rs/sp-core" [dependencies] -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -rustc-hex = { version = "2.0.1", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } byteorder = { version = "1.3.2", default-features = false } -primitive-types = { version = "0.6.2", default-features = false, features = ["codec"] } +primitive-types = { version = "0.7.0", default-features = false, features = ["codec"] } impl-serde = { version = "0.3.0", optional = true } wasmi = { version = "0.6.2", optional = true } hash-db = { version = "0.15.2", default-features = false } hash256-std-hasher = { version = "0.15.2", default-features = false } base58 = { version = "0.1.0", optional = true } rand = { version = "0.7.2", optional = true } -substrate-bip39 = { version = "0.3.1", optional = true } +substrate-bip39 = { version = "0.4.1", optional = true } tiny-bip39 = { version = "0.7", optional = true } regex = { version = "1.3.1", optional = true } num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.0.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0-alpha.2", path = "../debug-derive" } -sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0-alpha.2", default-features = false, path = "../storage" } -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +sp-debug-derive = { version = "2.0.0-alpha.5", path = "../debug-derive" } +sp-externalities = { version = "0.8.0-alpha.5", optional = true, path = "../externalities" } +sp-storage = { version = "2.0.0-alpha.5", default-features = false, path = "../storage" } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } +futures = { version = "0.3.1", optional = true } # full crypto ed25519-dalek = { version = "1.0.0-pre.3", default-features = false, features = ["u64_backend", "alloc"], optional = true } blake2-rfc = { version = "0.2.18", default-features = false, optional = true } tiny-keccak = { version = "2.0.1", features = ["keccak"], optional = true } -schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } +schnorrkel = { version = "0.9.1", features = ["preaudit_deprecated", "u64_backend"], default-features = false, optional = true } sha2 = { version = "0.8.0", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.5.0", default-features = false, optional = true } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"], optional = true } -sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0-alpha.2", path = "../serializer" } +sp-serializer = { version = "2.0.0-alpha.5", path = "../serializer" } pretty_assertions = "0.6.1" hex-literal = "0.2.1" rand = "0.7.2" @@ -74,14 +74,12 @@ std = [ "primitive-types/serde", "primitive-types/byteorder", "primitive-types/rustc-hex", - "primitive-types/libc", "impl-serde", "codec/std", "hash256-std-hasher/std", "hash-db/std", "sp-std/std", "serde", - "rustc-hex/std", "twox-hash/std", "blake2-rfc/std", "ed25519-dalek/std", @@ -102,7 +100,9 @@ std = [ "sp-externalities", "sp-storage/std", "sp-runtime-interface/std", - "zeroize/alloc" + "zeroize/alloc", + "futures", + "futures/thread-pool", ] # This feature enables all crypto primitives for `no_std` builds like microcontrollers @@ -119,3 +119,6 @@ full_crypto = [ "libsecp256k1", "sp-runtime-interface/disable_target_static_assertions", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 259c3c5f9a7043425aaccbe708bcea4d3c0df225..79a36b2ad2353887aa8c581de552e34f2fb34d3e 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -18,8 +18,10 @@ //! Cryptographic utilities. // end::description[] +use crate::{sr25519, ed25519}; use sp_std::hash::Hash; use sp_std::vec::Vec; +use sp_std::str; #[cfg(feature = "std")] use sp_std::convert::TryInto; use sp_std::convert::TryFrom; @@ -32,7 +34,8 @@ use codec::{Encode, Decode}; use regex::Regex; #[cfg(feature = "std")] use base58::{FromBase58, ToBase58}; - +#[cfg(feature = "std")] +use crate::hexdisplay::HexDisplay; use zeroize::Zeroize; #[doc(hidden)] pub use sp_std::ops::Deref; @@ -456,6 +459,8 @@ ss58_address_format!( (16, "kulupu", "Kulupu mainnet, standard account (*25519).") DarwiniaAccount => (18, "darwinia", "Darwinia Chain mainnet, standard account (*25519).") + RobonomicsAccount => + (32, "robonomics", "Any Robonomics network standard account (*25519).") CentrifugeAccount => (36, "centrifuge", "Centrifuge Chain mainnet, standard account (*25519).") SubstrateAccount => @@ -538,7 +543,9 @@ impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { } /// Trait suitable for typical cryptographic PKI key public type. -pub trait Public: AsRef<[u8]> + AsMut<[u8]> + Default + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync { +pub trait Public: + AsRef<[u8]> + AsMut<[u8]> + Default + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync +{ /// A new instance from the given slice. /// /// NOTE: No checking goes on to ensure this is a real public key. Only use it if @@ -554,6 +561,7 @@ pub trait Public: AsRef<[u8]> + AsMut<[u8]> + Default + Derive + CryptoType + Pa /// An opaque 32-byte cryptographic identifier. #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Default, Encode, Decode)] +#[cfg_attr(feature = "std", derive(Hash))] pub struct AccountId32([u8; 32]); impl UncheckedFrom for AccountId32 { @@ -614,6 +622,18 @@ impl From for [u8; 32] { } } +impl From for AccountId32 { + fn from(k: sr25519::Public) -> Self { + k.0.into() + } +} + +impl From for AccountId32 { + fn from(k: ed25519::Public) -> Self { + k.0.into() + } +} + #[cfg(feature = "std")] impl std::fmt::Display for AccountId32 { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { @@ -941,6 +961,27 @@ impl<'a> TryFrom<&'a str> for KeyTypeId { } } +/// An identifier for a specific cryptographic algorithm used by a key pair +#[derive(Debug, Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +pub struct CryptoTypeId(pub [u8; 4]); + +/// A type alias of CryptoTypeId & a public key +#[derive(Debug, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode)] +pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); + +#[cfg(feature = "std")] +impl sp_std::fmt::Display for CryptoTypePublicPair { + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let id = match str::from_utf8(&(self.0).0[..]) { + Ok(id) => id.to_string(), + Err(_) => { + format!("{:#?}", self.0) + } + }; + write!(f, "{}-{}", id, HexDisplay::from(&self.1)) + } +} + /// Known key types; this also functions as a global registry of key types for projects wishing to /// avoid collisions with each other. /// @@ -961,6 +1002,8 @@ pub mod key_types { pub const IM_ONLINE: KeyTypeId = KeyTypeId(*b"imon"); /// Key type for AuthorityDiscovery module, built-in. pub const AUTHORITY_DISCOVERY: KeyTypeId = KeyTypeId(*b"audi"); + /// Key type for staking, built-in. + pub const STAKING: KeyTypeId = KeyTypeId(*b"stak"); /// A key type ID useful for tests. pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy"); } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 548ae49b55487d31779a150909456beb74be04c8..8a45157844f3af20ce5ded4ce3dc28f6008a5983 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -36,10 +36,13 @@ use crate::{hashing::blake2_256, crypto::{Pair as TraitPair, DeriveJunction, Sec use crate::crypto::Ss58Codec; #[cfg(feature = "std")] use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, UncheckedFrom, CryptoType, Derive}; +use crate::crypto::{Public as TraitPublic, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; #[cfg(feature = "full_crypto")] use secp256k1::{PublicKey, SecretKey}; +/// An identifier used to match public keys against ecdsa keys +pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); + /// A secret seed (which is bytewise essentially equivalent to a SecretKey). /// /// We need it as a different type because `Seed` is expected to be AsRef<[u8]>. diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index 5e04dcceffb395fe5433bfcb28a217f8c44f06b5..abeac05388d2773e7ba80f60d6b4e9193ebe2fcd 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -38,10 +38,13 @@ use crate::crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}; use crate::crypto::Ss58Codec; #[cfg(feature = "std")] use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::{crypto::{Public as TraitPublic, UncheckedFrom, CryptoType, Derive}}; +use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; use sp_runtime_interface::pass_by::PassByInner; use sp_std::ops::Deref; +/// An identifier used to match public keys against ed25519 keys +pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); + /// A secret seed. It's not called a "secret key" because ring doesn't expose the secret keys /// of the key pair (yeah, dumb); as such we're forced to remember the seed manually if we /// will need it later (such as for HDKD). @@ -378,6 +381,18 @@ impl TraitPublic for Public { impl Derive for Public {} +impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } +} + +impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } +} + /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 79721b9b7699177037745276fe7f9983bc61ed5a..8d5ad7daaec83965e26ac78b68219f6e41da80e1 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -70,9 +70,11 @@ mod changes_trie; #[cfg(feature = "std")] pub mod traits; pub mod testing; +#[cfg(feature = "std")] +pub mod tasks; pub use self::hash::{H160, H256, H512, convert_hash}; -pub use self::uint::U256; +pub use self::uint::{U256, U512}; pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index 3495f32872f1694abe6756b643acb3ab7b7d3891..717952eb01c71b7cb398c5c57efc040704e07d24 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -39,7 +39,7 @@ use crate::crypto::{ #[cfg(feature = "std")] use crate::crypto::Ss58Codec; -use crate::{crypto::{Public as TraitPublic, UncheckedFrom, CryptoType, Derive}}; +use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; use crate::hash::{H256, H512}; use codec::{Encode, Decode}; use sp_std::ops::Deref; @@ -54,6 +54,9 @@ use sp_runtime_interface::pass_by::PassByInner; #[cfg(feature = "full_crypto")] const SIGNING_CTX: &[u8] = b"substrate"; +/// An identifier used to match public keys against sr25519 keys +pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"sr25"); + /// An Schnorrkel/Ristretto x25519 ("sr25519") public key. #[cfg_attr(feature = "full_crypto", derive(Hash))] #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Encode, Decode, Default, PassByInner)] @@ -390,6 +393,18 @@ impl TraitPublic for Public { } } +impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } +} + +impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } +} + #[cfg(feature = "std")] impl From for Pair { fn from(sec: MiniSecretKey) -> Pair { @@ -529,25 +544,24 @@ impl TraitPair for Pair { self.0.sign(context.bytes(message)).into() } - /// Verify a signature on a message. Returns true if the signature is good. fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { Self::verify_weak(&sig.0[..], message, pubkey) } - /// Verify a signature on a message. Returns true if the signature is good. fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets - // that have not been upgraded and those that have. To swap to 0.8.0 only, - // create `schnorrkel::Signature` and pass that into `verify_simple` - match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk.verify_simple_preaudit_deprecated( - SIGNING_CTX, message.as_ref(), &sig, - ).is_ok(), - Err(_) => false, - } + let signature = match schnorrkel::Signature::from_bytes(sig) { + Ok(signature) => signature, + Err(_) => return false, + }; + + let pub_key = match PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pub_key) => pub_key, + Err(_) => return false, + }; + + pub_key.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() } - /// Return a vec filled with raw data. fn to_raw_vec(&self) -> Vec { self.0.secret.to_bytes().to_vec() } @@ -566,6 +580,20 @@ impl Pair { let kp = mini_key.expand_to_keypair(ExpansionMode::Ed25519); (Pair(kp), mini_key.to_bytes()) } + + /// Verify a signature on a message. Returns `true` if the signature is good. + /// Supports old 0.1.1 deprecated signatures and should be used only for backward + /// compatibility. + pub fn verify_deprecated>(sig: &Signature, message: M, pubkey: &Public) -> bool { + // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets + // that have not been upgraded and those that have. + match PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pk) => pk.verify_simple_preaudit_deprecated( + SIGNING_CTX, message.as_ref(), &sig.0[..], + ).is_ok(), + Err(_) => false, + } + } } impl CryptoType for Public { @@ -586,7 +614,7 @@ impl CryptoType for Pair { #[cfg(test)] mod compatibility_test { use super::*; - use crate::crypto::{DEV_PHRASE}; + use crate::crypto::DEV_PHRASE; use hex_literal::hex; // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are @@ -609,14 +637,15 @@ mod compatibility_test { } #[test] - fn verify_known_message_should_work() { + fn verify_known_old_message_should_work() { let public = Public::from_raw(hex!("b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918")); // signature generated by the 1.1 version with the same ^^ public key. let signature = Signature::from_raw(hex!( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" )); let message = b"Verifying that I am the owner of 5G9hQLdsKQswNPgB499DeA5PkFBbgkLPJWkkS6FAM6xGQ8xD. Hash: 221455a3\n"; - assert!(Pair::verify(&signature, &message[..], &public)); + assert!(Pair::verify_deprecated(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, &message[..], &public)); } } @@ -776,7 +805,7 @@ mod test { } #[test] - fn verify_from_wasm_works() { + fn verify_from_old_wasm_works() { // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. // // This is to make sure that the wasm library is compatible. @@ -787,7 +816,8 @@ mod test { let js_signature = Signature::from_raw(hex!( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" )); - assert!(Pair::verify(&js_signature, b"SUBSTRATE", &public)); + assert!(Pair::verify_deprecated(&js_signature, b"SUBSTRATE", &public)); + assert!(!Pair::verify(&js_signature, b"SUBSTRATE", &public)); } #[test] diff --git a/primitives/core/src/tasks.rs b/primitives/core/src/tasks.rs new file mode 100644 index 0000000000000000000000000000000000000000..199a185e5371a0efdd272e824d934d07f994d1e3 --- /dev/null +++ b/primitives/core/src/tasks.rs @@ -0,0 +1,56 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Module for low-level asynchronous processing. + +use crate::traits::CloneableSpawn; +use futures::{executor, task}; + +/// Simple task executor. +/// +/// Uses single thread for scheduling tasks. Can be cloned and used in +/// runtime host (implements `CloneableSpawn`). +#[derive(Debug, Clone)] +pub struct Executor { + pool: executor::ThreadPool, +} + +impl Executor { + fn new() -> Self { + Self { + pool: executor::ThreadPool::builder().pool_size(1).create() + .expect("Failed to create task executor") + } + } +} + +impl task::Spawn for Executor { + fn spawn_obj(&self, future: task::FutureObj<'static, ()>) + -> Result<(), task::SpawnError> { + self.pool.spawn_obj(future) + } +} + +impl CloneableSpawn for Executor { + fn clone(&self) -> Box { + Box::new(Clone::clone(self)) + } +} + +/// Create tasks executor. +pub fn executor() -> Box { + Box::new(Executor::new()) +} \ No newline at end of file diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index 247323f9703b41ecf5a22897da4e4b0c6de1fb12..b5e6f4c7aff37427573de40587b9bfe6bf311281 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -16,10 +16,16 @@ //! Types that should only be used for testing! +use crate::crypto::{KeyTypeId, CryptoTypePublicPair}; #[cfg(feature = "std")] -use crate::{ed25519, sr25519, crypto::{Public, Pair}}; -use crate::crypto::KeyTypeId; - +use crate::{ + crypto::{Pair, Public}, + ed25519, sr25519, + traits::BareCryptoStoreError +}; +#[cfg(feature = "std")] +use std::collections::HashSet; +use codec::Encode; /// Key type for generic Ed25519 key. pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); /// Key type for generic Sr 25519 key. @@ -39,10 +45,41 @@ impl KeyStore { pub fn new() -> crate::traits::BareCryptoStorePtr { std::sync::Arc::new(parking_lot::RwLock::new(Self::default())) } + + fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { + self.keys.get(&id) + .and_then(|inner| + inner.get(pub_key.as_slice()) + .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) + ) + } + + fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { + self.keys.get(&id) + .and_then(|inner| + inner.get(pub_key.as_slice()) + .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) + ) + } + } #[cfg(feature = "std")] impl crate::traits::BareCryptoStore for KeyStore { + fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError> { + self.keys + .get(&id) + .map(|map| { + Ok(map.keys() + .fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v + })) + }) + .unwrap_or(Ok(vec![])) + } + fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { self.keys.get(&id) .map(|keys| @@ -58,10 +95,11 @@ impl crate::traits::BareCryptoStore for KeyStore { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { - let pair = sr25519::Pair::from_string(seed, None).expect("Generates an `sr25519` pair."); + let pair = sr25519::Pair::from_string(seed, None) + .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `sr25519` pair.".to_owned()))?; self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, @@ -73,14 +111,6 @@ impl crate::traits::BareCryptoStore for KeyStore { } } - fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.keys.get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) - ) - } - fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { self.keys.get(&id) .map(|keys| @@ -96,10 +126,11 @@ impl crate::traits::BareCryptoStore for KeyStore { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result { + ) -> Result { match seed { Some(seed) => { - let pair = ed25519::Pair::from_string(seed, None).expect("Generates an `ed25519` pair."); + let pair = ed25519::Pair::from_string(seed, None) + .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `ed25519` pair.".to_owned()))?; self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); Ok(pair.public()) }, @@ -111,14 +142,6 @@ impl crate::traits::BareCryptoStore for KeyStore { } } - fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.keys.get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) - ) - } - fn insert_unknown(&mut self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { self.keys.entry(id).or_default().insert(public.to_owned(), suri.to_string()); Ok(()) @@ -131,6 +154,40 @@ impl crate::traits::BareCryptoStore for KeyStore { fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { public_keys.iter().all(|(k, t)| self.keys.get(&t).and_then(|s| s.get(k)).is_some()) } + + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec, + ) -> std::result::Result, BareCryptoStoreError> { + let provided_keys = keys.into_iter().collect::>(); + let all_keys = self.keys(id)?.into_iter().collect::>(); + + Ok(provided_keys.intersection(&all_keys).cloned().collect()) + } + + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> Result, BareCryptoStoreError> { + match key.0 { + ed25519::CRYPTO_ID => { + let key_pair: ed25519::Pair = self + .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) + .ok_or(BareCryptoStoreError::PairNotFound("ed25519".to_owned()))?; + return Ok(key_pair.sign(msg).encode()); + } + sr25519::CRYPTO_ID => { + let key_pair: sr25519::Pair = self + .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) + .ok_or(BareCryptoStoreError::PairNotFound("sr25519".to_owned()))?; + return Ok(key_pair.sign(msg).encode()); + } + _ => Err(BareCryptoStoreError::KeyNotSupported(id)) + } + } } /// Macro for exporting functions from wasm in with the expected signature for using it with the @@ -247,11 +304,9 @@ mod tests { .ed25519_generate_new(ED25519, None) .expect("Generates key"); - let store_key_pair = store.read() - .ed25519_key_pair(ED25519, &public) - .expect("Key should exists in store"); + let public_keys = store.read().keys(ED25519).unwrap(); - assert_eq!(public, store_key_pair.public()); + assert!(public_keys.contains(&public.into())); } #[test] @@ -267,11 +322,8 @@ mod tests { key_pair.public().as_ref(), ).expect("Inserts unknown key"); - let store_key_pair = store.read().sr25519_key_pair( - SR25519, - &key_pair.public(), - ).expect("Gets key pair from keystore"); + let public_keys = store.read().keys(SR25519).unwrap(); - assert_eq!(key_pair.public(), store_key_pair.public()); + assert!(public_keys.contains(&key_pair.public().into())); } } diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 83cbebd7d6a92329c208fbadd27d01b451ad5e83..14839fb58562a3ec1bcff7219e6dbfb7e1b47b4d 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -16,14 +16,35 @@ //! Shareable Substrate traits. -use crate::{crypto::KeyTypeId, ed25519, sr25519}; +use crate::{ + crypto::{KeyTypeId, CryptoTypePublicPair}, + ed25519, sr25519, +}; use std::{ - fmt::{Debug, Display}, panic::UnwindSafe, sync::Arc, borrow::Cow, + borrow::Cow, + fmt::{Debug, Display}, + panic::UnwindSafe, + sync::Arc, }; pub use sp_externalities::{Externalities, ExternalitiesExt}; +/// BareCryptoStore error +#[derive(Debug)] +pub enum BareCryptoStoreError { + /// Public key type is not supported + KeyNotSupported(KeyTypeId), + /// Pair not found for public key and KeyTypeId + PairNotFound(String), + /// Validation error + ValidationError(String), + /// Keystore unavailable + Unavailable, + /// Programming errors + Other(String) +} + /// Something that generates, stores and provides access to keys. pub trait BareCryptoStore: Send + Sync { /// Returns all sr25519 public keys for the given key type. @@ -37,10 +58,7 @@ pub trait BareCryptoStore: Send + Sync { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result; - /// Returns the sr25519 key pair for the given key type and public key combination. - fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option; - + ) -> Result; /// Returns all ed25519 public keys for the given key type. fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; /// Generate a new ed25519 key pair for the given key type and an optional seed. @@ -52,10 +70,7 @@ pub trait BareCryptoStore: Send + Sync { &mut self, id: KeyTypeId, seed: Option<&str>, - ) -> Result; - - /// Returns the ed25519 key pair for the given key type and public key combination. - fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option; + ) -> Result; /// Insert a new key. This doesn't require any known of the crypto; but a public key must be /// manually provided. @@ -67,11 +82,78 @@ pub trait BareCryptoStore: Send + Sync { /// Get the password for this store. fn password(&self) -> Option<&str>; + /// Find intersection between provided keys and supported keys + /// + /// Provided a list of (CryptoTypeId,[u8]) pairs, this would return + /// a filtered set of public keys which are supported by the keystore. + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec + ) -> Result, BareCryptoStoreError>; + /// List all supported keys + /// + /// Returns a set of public keys the signer supports. + fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError>; /// Checks if the private keys for the given public key and key type combinations exist. /// /// Returns `true` iff all private keys could be found. fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool; + + /// Sign with key + /// + /// Signs a message with the private key that matches + /// the public key passed. + /// + /// Returns the SCALE encoded signature if key is found & supported, + /// an error otherwise. + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> Result, BareCryptoStoreError>; + + /// Sign with any key + /// + /// Given a list of public keys, find the first supported key and + /// sign the provided message with that key. + /// + /// Returns a tuple of the used key and the signature + fn sign_with_any( + &self, + id: KeyTypeId, + keys: Vec, + msg: &[u8] + ) -> Result<(CryptoTypePublicPair, Vec), BareCryptoStoreError> { + if keys.len() == 1 { + return self.sign_with(id, &keys[0], msg).map(|s| (keys[0].clone(), s)); + } else { + for k in self.supported_keys(id, keys)? { + if let Ok(sign) = self.sign_with(id, &k, msg) { + return Ok((k, sign)); + } + } + } + Err(BareCryptoStoreError::KeyNotSupported(id)) + } + + /// Sign with all keys + /// + /// Provided a list of public keys, sign a message with + /// each key given that the key is supported. + /// + /// Returns a list of `Result`s each representing the signature of each key or + /// a BareCryptoStoreError for non-supported keys. + fn sign_with_all( + &self, + id: KeyTypeId, + keys: Vec, + msg: &[u8], + ) -> Result, BareCryptoStoreError>>, ()>{ + Ok(keys.iter().map(|k| self.sign_with(id, k, msg)).collect()) + } } /// A pointer to the key store. @@ -212,3 +294,21 @@ impl CallInWasmExt { Self(Box::new(inner)) } } + +/// Something that can spawn tasks and also can be cloned. +pub trait CloneableSpawn: futures::task::Spawn + Send + Sync { + /// Clone as heap-allocated handle. + fn clone(&self) -> Box; +} + +sp_externalities::decl_extension! { + /// Task executor extension. + pub struct TaskExecutorExt(Box); +} + +impl TaskExecutorExt { + /// New instance of task executor extension. + pub fn new(spawn_handle: Box) -> Self { + Self(spawn_handle) + } +} diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index 54ed7ca317f001e06dd6c3e7edc0d9fa0029352f..e666137c08161d897707def5fadde55a9d2f1abe 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -16,7 +16,7 @@ //! An unsigned fixed-size integer. -pub use primitive_types::U256; +pub use primitive_types::{U256, U512}; #[cfg(test)] mod tests { diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 1ffa37308e234b1cfb2c9dd36ff8ebba0e2433ee..0079b6219f5b618c709c322cf5639b9f6eec85f9 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-debug-derive" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -22,3 +22,6 @@ std = [] [dev-dependencies] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index f462b7670e8299af967708cc25419266e70f2301..af6e516fbfcd32eec31dd9d446785a5028a70ddd 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-externalities" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -10,6 +10,9 @@ description = "Substrate externalities abstraction" documentation = "https://docs.rs/sp-externalities" [dependencies] -sp-storage = { version = "2.0.0-alpha.2", path = "../storage" } -sp-std = { version = "2.0.0-alpha.2", path = "../std" } +sp-storage = { version = "2.0.0-alpha.5", path = "../storage" } +sp-std = { version = "2.0.0-alpha.5", path = "../std" } environmental = { version = "1.1.1" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 6fbd239b89cef8995d2ab05d7192e710fcc87c34..6a7f94394710f3702df3c9d9e3404df667e7b99a 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -39,10 +39,14 @@ pub trait Externalities: ExtensionStore { /// Read runtime storage. fn storage(&self, key: &[u8]) -> Option>; - /// Get storage value hash. This may be optimized for large values. + /// Get storage value hash. + /// + /// This may be optimized for large values. fn storage_hash(&self, key: &[u8]) -> Option>; - /// Get child storage value hash. This may be optimized for large values. + /// Get child storage value hash. + /// + /// This may be optimized for large values. /// /// Returns an `Option` that holds the SCALE encoded hash. fn child_storage_hash( @@ -136,7 +140,7 @@ pub trait Externalities: ExtensionStore { /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). fn place_storage(&mut self, key: Vec, value: Option>); - /// Set or clear a child storage entry. Return whether the operation succeeds. + /// Set or clear a child storage entry. fn place_child_storage( &mut self, storage_key: ChildStorageKey, @@ -148,16 +152,17 @@ pub trait Externalities: ExtensionStore { /// Get the identity of the chain. fn chain_id(&self) -> u64; - /// Get the trie root of the current storage map. This will also update all child storage keys - /// in the top-level storage map. + /// Get the trie root of the current storage map. /// - /// The hash is defined by the `Block`. + /// This will also update all child storage keys in the top-level storage map. /// - /// Returns the SCALE encoded hash. + /// The returned hash is defined by the `Block` and is SCALE encoded. fn storage_root(&mut self) -> Vec; - /// Get the trie root of a child storage map. This will also update the value of the child - /// storage keys in the top-level storage map. + /// Get the trie root of a child storage map. + /// + /// This will also update the value of the child storage keys in the top-level storage map. + /// /// If the storage root equals the default hash as defined by the trie, the key in the top-level /// storage map will be removed. fn child_storage_root( @@ -165,12 +170,11 @@ pub trait Externalities: ExtensionStore { storage_key: ChildStorageKey, ) -> Vec; - /// Get the change trie root of the current storage overlay at a block with given parent. - /// `parent` is expects a SCALE encoded hash. + /// Get the changes trie root of the current storage overlay at a block with given `parent`. /// - /// The hash is defined by the `Block`. + /// `parent` expects a SCALE encoded hash. /// - /// Returns the SCALE encoded hash. + /// The returned hash is defined by the `Block` and is SCALE encoded. fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index d36f0e4b527207ddb716b48aef496b184ca5b05b..0595fa7ba700649fa2c4df5ae1ad3178648fdc1f 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,12 +11,12 @@ documentation = "https://docs.rs/sp-finality-grandpa" [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } [features] default = ["std"] @@ -28,3 +28,6 @@ std = [ "sp-api/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index f89cb24d4a74cb3dc5871e1c4f64b4d6862d4dfe..4e6cf6c92d324c36278c18ef0b5b5703b21b56e3 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-finality-tracker" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,9 +9,9 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME module that tracks the last finalized block, as perceived by block authors." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } [features] default = ["std"] @@ -20,3 +20,6 @@ std = [ "sp-std/std", "sp-inherents/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 839edba73de92a1ae2c13bf2fa10961be3c36d65..dd640f00ec1a272d87511bd2f22f88916abeebbb 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-inherents" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,9 +12,9 @@ documentation = "https://docs.rs/sp-inherents" [dependencies] parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } [features] @@ -26,3 +26,6 @@ std = [ "sp-core/std", "derive_more", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index 4f740638024bcaf8974bfc6830f904d47de7271b..9cda2120cc2b45290da425f14f758ba483445b43 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-io" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,16 +11,16 @@ documentation = "https://docs.rs/sp-io" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } +sp-state-machine = { version = "0.8.0-alpha.5", optional = true, path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../../primitives/wasm-interface", default-features = false } +sp-runtime-interface = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "2.0.0-alpha.5", optional = true, path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-alpha.5", optional = true, path = "../externalities" } log = { version = "0.4.8", optional = true } [features] @@ -46,3 +46,6 @@ std = [ disable_panic_handler = [] disable_oom = [] disable_allocator = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 2702be02071af0def98a5aae5f45e1583df7583a..bc49df159eb6f873d5a9ac7e011a529cc2422cc0 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -286,7 +286,8 @@ pub trait Storage { /// /// The hashing algorithm is defined by the `Block`. /// - /// Returns an `Option` that holds the SCALE encoded hash. + /// Returns an `Some(_)` which holds the SCALE encoded hash or `None` when + /// changes trie is disabled. fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { self.storage_changes_root(parent_hash) .expect("Invalid `parent_hash` given to `changes_root`.") @@ -410,8 +411,9 @@ pub trait Crypto { self.extension::() .expect("No `keystore` associated for the current context!") .read() - .ed25519_key_pair(id, &pub_key) - .map(|k| k.sign(msg)) + .sign_with(id, &pub_key.into(), msg) + .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) + .ok() } /// Verify an `ed25519` signature. @@ -462,13 +464,23 @@ pub trait Crypto { self.extension::() .expect("No `keystore` associated for the current context!") .read() - .sr25519_key_pair(id, &pub_key) - .map(|k| k.sign(msg)) + .sign_with(id, &pub_key.into(), msg) + .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) + .ok() + } + + /// Verify an `sr25519` signature. + /// + /// Returns `true` when the verification in successful regardless of + /// signature version. + fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool { + sr25519::Pair::verify_deprecated(sig, msg, pubkey) } /// Verify an `sr25519` signature. /// /// Returns `true` when the verification in successful. + #[version(2)] fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool { sr25519::Pair::verify(sig, msg, pubkey) } diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index 1ec4ebe547f360a803dfa2b64e89427add103df0..0764146250446c367c0c0bfbd53147e0a9dd4134 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-keyring" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,7 +11,10 @@ documentation = "https://docs.rs/sp-keyring" [dependencies] -sp-core = { version = "2.0.0-alpha.2", path = "../core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-core = { version = "2.0.0-alpha.5", path = "../core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } lazy_static = "1.4.0" strum = { version = "0.16.0", features = ["derive"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index 45324d368b94428540b20202d1daf6dc8f20bcae..66febccd59b9b6ac5a9111cfbd4fcb8027e8113a 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -9,8 +9,8 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } [features] default = ["std"] @@ -18,3 +18,6 @@ std = [ "sp-api/std", "sp-runtime/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index 592107b84f06a4f6d789f27e152b2a7b2c393caf..169443f6c42cee9f40de81baeb04fe5d49a8ba76 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-panic-handler" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,3 +12,6 @@ documentation = "https://docs.rs/sp-panic-handler" [dependencies] backtrace = "0.3.38" log = "0.4.8" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/phragmen/Cargo.toml b/primitives/phragmen/Cargo.toml index 6a599bdabd830a746c17a595c2299dbbfcf7c339..f5d26e8a40b80427360bfd56915a540d84d584bc 100644 --- a/primitives/phragmen/Cargo.toml +++ b/primitives/phragmen/Cargo.toml @@ -1,27 +1,34 @@ [package] name = "sp-phragmen" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" -description = "PHRAGMENT primitives" +description = "Phragmen primitives" [dependencies] +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +sp-phragmen-compact = { version = "2.0.0-alpha.4", path = "./compact" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } -sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } -rand = "0.7.2" +substrate-test-utils = { version = "2.0.0-alpha.5", path = "../../test-utils" } +rand = "0.7.3" +sp-phragmen = { path = "." } [features] default = ["std"] +bench = [] std = [ + "codec/std", "serde", "sp-std/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/phragmen/benches/phragmen.rs b/primitives/phragmen/benches/phragmen.rs index aa99e6f384ba65fd1d4621d3a3e608cb632e1a12..33da0b19563a8d3f7ef226077862558b5e0fd285 100644 --- a/primitives/phragmen/benches/phragmen.rs +++ b/primitives/phragmen/benches/phragmen.rs @@ -16,6 +16,7 @@ //! Note that execution times will not be accurate in an absolute scale, since //! - Everything is executed in the context of `TestExternalities` //! - Everything is executed in native environment. + #![cfg(feature = "bench")] #![feature(test)] diff --git a/primitives/phragmen/compact/Cargo.toml b/primitives/phragmen/compact/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..56b4520c54277825b7dc340b8fef7a9f132ca43a --- /dev/null +++ b/primitives/phragmen/compact/Cargo.toml @@ -0,0 +1,21 @@ +[package] +name = "sp-phragmen-compact" +version = "2.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Phragmen Compact Solution" + +[lib] +proc-macro = true + +[dependencies] +syn = { version = "1.0.7", features = ["full", "visit"] } +quote = "1.0" +proc-macro2 = "1.0.6" +proc-macro-crate = "0.1.4" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/phragmen/compact/src/assignment.rs b/primitives/phragmen/compact/src/assignment.rs new file mode 100644 index 0000000000000000000000000000000000000000..587e482ccb22096182404dd04823c05749d4575b --- /dev/null +++ b/primitives/phragmen/compact/src/assignment.rs @@ -0,0 +1,210 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Code generation for the ratio assignment type. + +use crate::field_name_for; +use proc_macro2::{TokenStream as TokenStream2}; +use syn::{GenericArgument}; +use quote::quote; + +fn from_impl(count: usize) -> TokenStream2 { + let from_impl_single = { + let name = field_name_for(1); + quote!(1 => compact.#name.push( + ( + index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ) + ),) + }; + + let from_impl_double = { + let name = field_name_for(2); + quote!(2 => compact.#name.push( + ( + index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ( + index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution[0].1, + ), + index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ) + ),) + }; + + let from_impl_rest = (3..=count).map(|c| { + let inner = (0..c-1).map(|i| + quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) + ).collect::(); + + let field_name = field_name_for(c); + let last_index = c - 1; + let last = quote!(index_of_target(&distribution[#last_index].0).ok_or(_phragmen::Error::CompactInvalidIndex)?); + + quote!( + #c => compact.#field_name.push((index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, [#inner], #last)), + ) + }).collect::(); + + quote!( + #from_impl_single + #from_impl_double + #from_impl_rest + ) +} + +fn into_impl(count: usize) -> TokenStream2 { + let into_impl_single = { + let name = field_name_for(1); + quote!( + for (voter_index, target_index) in self.#name { + assignments.push(_phragmen::Assignment { + who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution: vec![ + (target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, Accuracy::one()) + ], + }) + } + ) + }; + + let into_impl_double = { + let name = field_name_for(2); + quote!( + for (voter_index, (t1_idx, p1), t2_idx) in self.#name { + if p1 >= Accuracy::one() { + return Err(_phragmen::Error::CompactStakeOverflow); + } + + // defensive only. Since Percent doesn't have `Sub`. + let p2 = _phragmen::sp_runtime::traits::Saturating::saturating_sub( + Accuracy::one(), + p1, + ); + + assignments.push( _phragmen::Assignment { + who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution: vec![ + (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p1), + (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p2), + ] + }); + } + ) + }; + + let into_impl_rest = (3..=count).map(|c| { + let name = field_name_for(c); + quote!( + for (voter_index, inners, t_last_idx) in self.#name { + let mut sum = Accuracy::zero(); + let mut inners_parsed = inners + .iter() + .map(|(ref t_idx, p)| { + sum = _phragmen::sp_runtime::traits::Saturating::saturating_add(sum, *p); + let target = target_at(*t_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?; + Ok((target, *p)) + }) + .collect::, _phragmen::Error>>()?; + + if sum >= Accuracy::one() { + return Err(_phragmen::Error::CompactStakeOverflow); + } + + // defensive only. Since Percent doesn't have `Sub`. + let p_last = _phragmen::sp_runtime::traits::Saturating::saturating_sub( + Accuracy::one(), + sum, + ); + + inners_parsed.push((target_at(t_last_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p_last)); + + assignments.push(_phragmen::Assignment { + who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution: inners_parsed, + }); + } + ) + }).collect::(); + + quote!( + #into_impl_single + #into_impl_double + #into_impl_rest + ) +} + +pub(crate) fn assignment( + ident: syn::Ident, + voter_type: GenericArgument, + target_type: GenericArgument, + count: usize, +) -> TokenStream2 { + + let from_impl = from_impl(count); + let into_impl = into_impl(count); + + quote!( + impl< + #voter_type: _phragmen::codec::Codec + Default + Copy, + #target_type: _phragmen::codec::Codec + Default + Copy, + Accuracy: + _phragmen::codec::Codec + Default + Clone + _phragmen::sp_runtime::PerThing + + PartialOrd, + > + #ident<#voter_type, #target_type, Accuracy> + { + pub fn from_assignment( + assignments: Vec<_phragmen::Assignment>, + index_of_voter: FV, + index_of_target: FT, + ) -> Result + where + for<'r> FV: Fn(&'r A) -> Option<#voter_type>, + for<'r> FT: Fn(&'r A) -> Option<#target_type>, + A: _phragmen::IdentifierT, + { + let mut compact: #ident< + #voter_type, + #target_type, + Accuracy, + > = Default::default(); + + for _phragmen::Assignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_phragmen::Error::CompactTargetOverflow); + } + } + }; + Ok(compact) + } + + pub fn into_assignment( + self, + voter_at: impl Fn(#voter_type) -> Option, + target_at: impl Fn(#target_type) -> Option, + ) -> Result>, _phragmen::Error> { + let mut assignments: Vec<_phragmen::Assignment> = Default::default(); + #into_impl + Ok(assignments) + } + } + ) +} diff --git a/primitives/phragmen/compact/src/lib.rs b/primitives/phragmen/compact/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..114aeaeb32ed92ae4badbb59d38dc77cbcf309e7 --- /dev/null +++ b/primitives/phragmen/compact/src/lib.rs @@ -0,0 +1,219 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Proc macro for phragmen compact assignment. + +use proc_macro::TokenStream; +use proc_macro2::{TokenStream as TokenStream2, Span, Ident}; +use proc_macro_crate::crate_name; +use quote::quote; +use syn::{GenericArgument, Type, parse::{Parse, ParseStream, Result}}; + +mod assignment; +mod staked; + +// prefix used for struct fields in compact. +const PREFIX: &'static str = "votes"; + +/// Generates a struct to store the phragmen assignments in a compact way. The struct can only store +/// distributions up to the given input count. The given count must be greater than 2. +/// +/// ```ignore +/// // generate a struct with nominator and edge weight u128, with maximum supported +/// // edge per voter of 16. +/// generate_compact_solution_type(pub TestCompact, 16) +/// ``` +/// +/// This generates: +/// +/// ```ignore +/// pub struct TestCompact { +/// votes1: Vec<(V, T)>, +/// votes2: Vec<(V, (T, W), T)>, +/// votes3: Vec<(V, [(T, W); 2usize], T)>, +/// votes4: Vec<(V, [(T, W); 3usize], T)>, +/// votes5: Vec<(V, [(T, W); 4usize], T)>, +/// votes6: Vec<(V, [(T, W); 5usize], T)>, +/// votes7: Vec<(V, [(T, W); 6usize], T)>, +/// votes8: Vec<(V, [(T, W); 7usize], T)>, +/// votes9: Vec<(V, [(T, W); 8usize], T)>, +/// votes10: Vec<(V, [(T, W); 9usize], T)>, +/// votes11: Vec<(V, [(T, W); 10usize], T)>, +/// votes12: Vec<(V, [(T, W); 11usize], T)>, +/// votes13: Vec<(V, [(T, W); 12usize], T)>, +/// votes14: Vec<(V, [(T, W); 13usize], T)>, +/// votes15: Vec<(V, [(T, W); 14usize], T)>, +/// votes16: Vec<(V, [(T, W); 15usize], T)>, +/// } +/// ``` +/// +/// The generic arguments are: +/// - `V`: identifier/index for voter (nominator) types. +/// - `T` identifier/index for candidate (validator) types. +/// - `W` weight type. +/// +/// Some conversion implementations are provided by default if +/// - `W` is u128, or +/// - `W` is anything that implements `PerThing` (such as `Perbill`) +/// +/// The ideas behind the structure are as follows: +/// +/// - For single distribution, no weight is stored. The weight is known to be 100%. +/// - For all the rest, the weight if the last distribution is omitted. This value can be computed +/// from the rest. +/// +#[proc_macro] +pub fn generate_compact_solution_type(item: TokenStream) -> TokenStream { + let CompactSolutionDef { + vis, + ident, + count, + } = syn::parse_macro_input!(item as CompactSolutionDef); + + let voter_type = GenericArgument::Type(Type::Verbatim(quote!(V))); + let target_type = GenericArgument::Type(Type::Verbatim(quote!(T))); + let weight_type = GenericArgument::Type(Type::Verbatim(quote!(W))); + + let imports = imports().unwrap_or_else(|e| e.to_compile_error()); + + let compact_def = struct_def( + vis, + ident.clone(), + count, + voter_type.clone(), + target_type.clone(), + weight_type, + ).unwrap_or_else(|e| e.to_compile_error()); + + let assignment_impls = assignment::assignment( + ident.clone(), + voter_type.clone(), + target_type.clone(), + count, + ); + + let staked_impls = staked::staked( + ident, + voter_type, + target_type, + count, + ); + + quote!( + #imports + #compact_def + #assignment_impls + #staked_impls + ).into() +} + +fn struct_def( + vis: syn::Visibility, + ident: syn::Ident, + count: usize, + voter_type: GenericArgument, + target_type: GenericArgument, + weight_type: GenericArgument, +) -> Result { + if count <= 2 { + Err(syn::Error::new( + Span::call_site(), + "cannot build compact solution struct with capacity less than 2." + ))? + } + + let singles = { + let name = field_name_for(1); + quote!(#name: Vec<(#voter_type, #target_type)>,) + }; + + let doubles = { + let name = field_name_for(2); + quote!(#name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>,) + }; + + let rest = (3..=count).map(|c| { + let field_name = field_name_for(c); + let array_len = c - 1; + quote!( + #field_name: Vec<( + #voter_type, + [(#target_type, #weight_type); #array_len], + #target_type + )>, + ) + }).collect::(); + + Ok(quote! ( + /// A struct to encode a Phragmen assignment in a compact way. + #[derive( + Default, + PartialEq, + Eq, + Clone, + _phragmen::sp_runtime::RuntimeDebug, + _phragmen::codec::Encode, + _phragmen::codec::Decode, + )] + #vis struct #ident<#voter_type, #target_type, #weight_type> { + // _marker: sp_std::marker::PhantomData, + #singles + #doubles + #rest + } + + impl<#voter_type, #target_type, #weight_type> _phragmen::VotingLimit + for #ident<#voter_type, #target_type, #weight_type> + { + const LIMIT: usize = #count; + } + )) +} + +fn imports() -> Result { + let sp_phragmen_imports = match crate_name("sp-phragmen") { + Ok(sp_phragmen) => { + let ident = syn::Ident::new(&sp_phragmen, Span::call_site()); + quote!( extern crate #ident as _phragmen; ) + } + Err(e) => return Err(syn::Error::new(Span::call_site(), &e)), + }; + + Ok(quote!( + #sp_phragmen_imports + )) +} + +struct CompactSolutionDef { + vis: syn::Visibility, + ident: syn::Ident, + count: usize, +} + +impl Parse for CompactSolutionDef { + fn parse(input: ParseStream) -> syn::Result { + let vis: syn::Visibility = input.parse()?; + let ident: syn::Ident = input.parse()?; + let _ = ::parse(input)?; + let count_literal: syn::LitInt = input.parse()?; + let count = count_literal.base10_parse::()?; + Ok(Self { vis, ident, count } ) + } +} + +fn field_name_for(n: usize) -> Ident { + Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) +} diff --git a/primitives/phragmen/compact/src/staked.rs b/primitives/phragmen/compact/src/staked.rs new file mode 100644 index 0000000000000000000000000000000000000000..a7cf853f17086651ef4be37dcf2538cdce936dc6 --- /dev/null +++ b/primitives/phragmen/compact/src/staked.rs @@ -0,0 +1,208 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Code generation for the staked assignment type. + +use crate::field_name_for; +use proc_macro2::{TokenStream as TokenStream2}; +use syn::{GenericArgument}; +use quote::quote; + +fn from_impl(count: usize) -> TokenStream2 { + let from_impl_single = { + let name = field_name_for(1); + quote!(1 => compact.#name.push( + ( + index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ) + ),) + }; + + let from_impl_double = { + let name = field_name_for(2); + quote!(2 => compact.#name.push( + ( + index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ( + index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution[0].1, + ), + index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, + ) + ),) + }; + + let from_impl_rest = (3..=count).map(|c| { + let inner = (0..c-1).map(|i| + quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) + ).collect::(); + + let field_name = field_name_for(c); + let last_index = c - 1; + let last = quote!(index_of_target(&distribution[#last_index].0).ok_or(_phragmen::Error::CompactInvalidIndex)?); + + quote!( + #c => compact.#field_name.push((index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, [#inner], #last)), + ) + }).collect::(); + + quote!( + #from_impl_single + #from_impl_double + #from_impl_rest + ) +} + +fn into_impl(count: usize) -> TokenStream2 { + let into_impl_single = { + let name = field_name_for(1); + quote!( + for (voter_index, target_index) in self.#name { + let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let all_stake = max_of(&who); + assignments.push(_phragmen::StakedAssignment { + who, + distribution: vec![(target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, all_stake)], + }) + } + ) + }; + + let into_impl_double = { + let name = field_name_for(2); + quote!( + for (voter_index, (t1_idx, w1), t2_idx) in self.#name { + let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let all_stake = max_of(&who); + + if w1 >= all_stake { + return Err(_phragmen::Error::CompactStakeOverflow); + } + + // w2 is ensured to be positive. + let w2 = all_stake - w1; + assignments.push( _phragmen::StakedAssignment { + who, + distribution: vec![ + (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w1), + (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w2), + ] + }); + } + ) + }; + + let into_impl_rest = (3..=count).map(|c| { + let name = field_name_for(c); + quote!( + for (voter_index, inners, t_last_idx) in self.#name { + let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let mut sum = u128::min_value(); + let all_stake = max_of(&who); + + let mut inners_parsed = inners + .iter() + .map(|(ref t_idx, w)| { + sum = sum.saturating_add(*w); + let target = target_at(*t_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?; + Ok((target, *w)) + }).collect::, _phragmen::Error>>()?; + + if sum >= all_stake { + return Err(_phragmen::Error::CompactStakeOverflow); + } + // w_last is proved to be positive. + let w_last = all_stake - sum; + + inners_parsed.push((target_at(t_last_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w_last)); + + assignments.push(_phragmen::StakedAssignment { + who, + distribution: inners_parsed, + }); + } + ) + }).collect::(); + + quote!( + #into_impl_single + #into_impl_double + #into_impl_rest + ) +} + +pub(crate) fn staked( + ident: syn::Ident, + voter_type: GenericArgument, + target_type: GenericArgument, + count: usize, +) -> TokenStream2 { + + let from_impl = from_impl(count); + let into_impl = into_impl(count); + quote!( + impl< + #voter_type: _phragmen::codec::Codec + Default + Copy, + #target_type: _phragmen::codec::Codec + Default + Copy, + > + #ident<#voter_type, #target_type, u128> + { + /// Generate self from a vector of `StakedAssignment`. + pub fn from_staked( + assignments: Vec<_phragmen::StakedAssignment>, + index_of_voter: FV, + index_of_target: FT, + ) -> Result + where + for<'r> FV: Fn(&'r A) -> Option<#voter_type>, + for<'r> FT: Fn(&'r A) -> Option<#target_type>, + A: _phragmen::IdentifierT + { + let mut compact: #ident<#voter_type, #target_type, u128> = Default::default(); + for _phragmen::StakedAssignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_phragmen::Error::CompactTargetOverflow); + } + } + }; + Ok(compact) + } + + /// Convert self into `StakedAssignment`. The given function should return the total + /// weight of a voter. It is used to subtract the sum of all the encoded weights to + /// infer the last one. + pub fn into_staked( + self, + max_of: FM, + voter_at: impl Fn(#voter_type) -> Option, + target_at: impl Fn(#target_type) -> Option, + ) + -> Result>, _phragmen::Error> + where + for<'r> FM: Fn(&'r A) -> u128, + A: _phragmen::IdentifierT, + { + let mut assignments: Vec<_phragmen::StakedAssignment> = Default::default(); + #into_impl + Ok(assignments) + } + } + ) +} diff --git a/primitives/arithmetic/fuzzer/.gitignore b/primitives/phragmen/fuzzer/.gitignore similarity index 100% rename from primitives/arithmetic/fuzzer/.gitignore rename to primitives/phragmen/fuzzer/.gitignore diff --git a/primitives/phragmen/fuzzer/Cargo.lock b/primitives/phragmen/fuzzer/Cargo.lock new file mode 100644 index 0000000000000000000000000000000000000000..3ef2a2732424a3ddd412192dc01ac0647c36ffe1 --- /dev/null +++ b/primitives/phragmen/fuzzer/Cargo.lock @@ -0,0 +1,1602 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "Inflector" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" +dependencies = [ + "lazy_static", + "regex", +] + +[[package]] +name = "ahash" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f33b5018f120946c1dcf279194f238a9f146725593ead1c08fa47ff22b0b5d3" +dependencies = [ + "const-random", +] + +[[package]] +name = "aho-corasick" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8716408b8bc624ed7f65d223ddb9ac2d044c0547b6fa4b0d554f3a9540496ada" +dependencies = [ + "memchr", +] + +[[package]] +name = "arbitrary" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64cf76cb6e2222ed0ea86b2b0ee2f71c96ec6edd5af42e84d59160e91b836ec4" + +[[package]] +name = "arrayref" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" + +[[package]] +name = "arrayvec" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd9fd44efafa8690358b7408d253adf110036b88f55672a933f01d616ad9b1b9" +dependencies = [ + "nodrop", +] + +[[package]] +name = "arrayvec" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cff77d8686867eceff3105329d4698d96c2391c176d5d03adc90c7389162b5b8" + +[[package]] +name = "autocfg" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d49d90015b3c36167a20fe2810c5cd875ad504b39cff3d4eae7977e6b7c1cb2" + +[[package]] +name = "autocfg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" + +[[package]] +name = "backtrace" +version = "0.3.45" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad235dabf00f36301792cfe82499880ba54c6486be094d1047b02bacb67c14e8" +dependencies = [ + "backtrace-sys", + "cfg-if", + "libc", + "rustc-demangle", +] + +[[package]] +name = "backtrace-sys" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca797db0057bae1a7aa2eef3283a874695455cecf08a43bfb8507ee0ebc1ed69" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "base58" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5024ee8015f02155eee35c711107ddd9a9bf3cb689cf2a9089c97e79b6e1ae83" + +[[package]] +name = "bitflags" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf1de2fe8c75bc145a2f577add951f8134889b4795d47466a54a5c846d691693" + +[[package]] +name = "bitvec" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993f74b4c99c1908d156b8d2e0fb6277736b0ecbd833982fd1241d39b2766a6" + +[[package]] +name = "blake2-rfc" +version = "0.2.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" +dependencies = [ + "arrayvec 0.4.12", + "constant_time_eq", +] + +[[package]] +name = "block-buffer" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0940dc441f31689269e10ac70eb1002a3a1d3ad1390e030043662eb7fe4688b" +dependencies = [ + "block-padding", + "byte-tools", + "byteorder", + "generic-array", +] + +[[package]] +name = "block-padding" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa79dedbb091f449f1f39e53edf88d5dbe95f895dae6135a8d7b881fb5af73f5" +dependencies = [ + "byte-tools", +] + +[[package]] +name = "byte-slice-cast" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b0a5e3906bcbf133e33c1d4d95afc664ad37fbdb9f6568d8043e7ea8c27d93d3" + +[[package]] +name = "byte-tools" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3b5ca7a04898ad4bcd41c90c5285445ff5b791899bb1b0abdd2a2aa791211d7" + +[[package]] +name = "byteorder" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08c48aae112d48ed9f069b33538ea9e3e90aa263cfa3d1c24309612b1f7472de" + +[[package]] +name = "cc" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95e28fa049fda1c330bcf9d723be7663a899c4679724b34c81e9f5a326aab8cd" + +[[package]] +name = "cfg-if" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + +[[package]] +name = "clear_on_drop" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97276801e127ffb46b66ce23f35cc96bd454fa311294bced4bbace7baa8b1d17" +dependencies = [ + "cc", +] + +[[package]] +name = "cloudabi" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc5b9aa5d4507acaf872de71051dfd0e309860e88966e1051e462a077aac4f" +dependencies = [ + "bitflags", +] + +[[package]] +name = "const-random" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f1af9ac737b2dd2d577701e59fd09ba34822f6f2ebdb30a7647405d9e55e16a" +dependencies = [ + "const-random-macro", + "proc-macro-hack", +] + +[[package]] +name = "const-random-macro" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25e4c606eb459dd29f7c57b2e0879f2b6f14ee130918c2b78ccb58a9624e6c7a" +dependencies = [ + "getrandom", + "proc-macro-hack", +] + +[[package]] +name = "constant_time_eq" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" + +[[package]] +name = "crunchy" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" + +[[package]] +name = "crypto-mac" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4434400df11d95d556bac068ddfedd482915eb18fe8bea89bc80b6e4b1c179e5" +dependencies = [ + "generic-array", + "subtle 1.0.0", +] + +[[package]] +name = "curve25519-dalek" +version = "1.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b7dcd30ba50cdf88b55b033456138b7c0ac4afdc436d82e1b79f370f24cc66d" +dependencies = [ + "byteorder", + "clear_on_drop", + "digest", + "rand_core 0.3.1", + "subtle 2.2.2", +] + +[[package]] +name = "curve25519-dalek" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26778518a7f6cffa1d25a44b602b62b979bd88adb9e99ffec546998cf3404839" +dependencies = [ + "byteorder", + "digest", + "rand_core 0.5.1", + "subtle 2.2.2", + "zeroize 1.1.0", +] + +[[package]] +name = "derive_more" +version = "0.99.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a806e96c59a76a5ba6e18735b6cf833344671e61e7863f2edb5c518ea2cac95c" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "digest" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3d0c8c8752312f9713efd397ff63acb9f85585afbf179282e720e7704954dd5" +dependencies = [ + "generic-array", +] + +[[package]] +name = "ed25519-dalek" +version = "1.0.0-pre.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978710b352437433c97b2bff193f2fb1dfd58a093f863dd95e225a19baa599a2" +dependencies = [ + "clear_on_drop", + "curve25519-dalek 2.0.0", + "rand 0.7.3", + "sha2", +] + +[[package]] +name = "environmental" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "516aa8d7a71cb00a1c4146f0798549b93d083d4f189b3ced8f3de6b8f11ee6c4" + +[[package]] +name = "failure" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" +dependencies = [ + "backtrace", + "failure_derive", +] + +[[package]] +name = "failure_derive" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] + +[[package]] +name = "fake-simd" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e88a8acf291dafb59c2d96e8f59828f3838bb1a70398823ade51a84de6a6deed" + +[[package]] +name = "fixed-hash" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3367952ceb191f4ab95dd5685dc163ac539e36202f9fcfd0cb22f9f9c542fefc" +dependencies = [ + "byteorder", + "libc", + "rand 0.7.3", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "fuchsia-cprng" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" + +[[package]] +name = "generic-array" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c68f0274ae0e023facc3c97b2e00f076be70e254bc851d972503b328db79b2ec" +dependencies = [ + "typenum", +] + +[[package]] +name = "getrandom" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abc8dd8451921606d809ba32e95b6111925cd2906060d2dcc29c070220503eb" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "hash-db" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d23bd4e7b5eda0d0f3a307e8b381fdc8ba9000f26fbe912250c0a4cc3956364a" + +[[package]] +name = "hash256-std-hasher" +version = "0.15.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92c171d55b98633f4ed3860808f004099b36c1cc29c42cfc53aa8591b21efcf2" +dependencies = [ + "crunchy", +] + +[[package]] +name = "hashbrown" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e6073d0ca812575946eb5f35ff68dbe519907b25c42530389ff946dc84c6ead" +dependencies = [ + "ahash", + "autocfg 0.1.7", +] + +[[package]] +name = "hex" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" + +[[package]] +name = "hmac" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5dcb5e64cda4c23119ab41ba960d1e170a774c8e4b9d9e6a9bc18aabf5e59695" +dependencies = [ + "crypto-mac", + "digest", +] + +[[package]] +name = "hmac-drbg" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6e570451493f10f6581b48cdd530413b63ea9e780f544bfd3bdcaa0d89d1a7b" +dependencies = [ + "digest", + "generic-array", + "hmac", +] + +[[package]] +name = "honggfuzz" +version = "0.5.45" +dependencies = [ + "arbitrary", + "lazy_static", + "memmap", +] + +[[package]] +name = "impl-codec" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1be51a921b067b0eaca2fad532d9400041561aa922221cc65f95a85641c6bf53" +dependencies = [ + "parity-scale-codec", +] + +[[package]] +name = "impl-serde" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58e3cae7e99c7ff5a995da2cf78dd0a5383740eda71d98cf7b1910c301ac69b8" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-serde" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bbe9ea9b182f0fb1cabbd61f4ff9b7b7b9197955e95a7e4c27de5055eb29ff8" +dependencies = [ + "serde", +] + +[[package]] +name = "impl-trait-for-tuples" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "integer-sqrt" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f65877bf7d44897a473350b1046277941cee20b263397e90869c50b6e766088b" + +[[package]] +name = "keccak" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67c21572b4949434e4fc1e1978b99c5f77064153c59d998bf13ecd96fb5ecba7" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.67" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" + +[[package]] +name = "libsecp256k1" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc1e2c808481a63dc6da2074752fdd4336a3c8fcc68b83db6f1fd5224ae7962" +dependencies = [ + "arrayref", + "crunchy", + "digest", + "hmac-drbg", + "rand 0.7.3", + "sha2", + "subtle 2.2.2", + "typenum", +] + +[[package]] +name = "lock_api" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79b2de95ecb4691949fea4716ca53cdbcfccb2c612e19644a8bad05edcf9f47b" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "14b6052be84e6b71ab17edffc2eeabf5c2c3ae1fdb464aae35ac50c67a44e1f7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "maybe-uninit" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" + +[[package]] +name = "memchr" +version = "2.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" + +[[package]] +name = "memmap" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6585fd95e7bb50d6cc31e20d4cf9afb4e2ba16c5846fc76793f11218da9c475b" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "memory-db" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "198831fe8722331a395bc199a5d08efbc197497ef354cb4c77b969c02ffc0fc4" +dependencies = [ + "ahash", + "hash-db", + "hashbrown", + "parity-util-mem", +] + +[[package]] +name = "memory_units" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "71d96e3f3c0b6325d8ccd83c33b28acb183edcb6c67938ba104ec546854b0882" + +[[package]] +name = "merlin" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b0942b357c1b4d0dc43ba724674ec89c3218e6ca2b3e8269e7cb53bcecd2f6e" +dependencies = [ + "byteorder", + "keccak", + "rand_core 0.4.2", + "zeroize 1.1.0", +] + +[[package]] +name = "nodrop" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72ef4a56884ca558e5ddb05a1d1e7e1bfd9a68d9ed024c21704cc98872dae1bb" + +[[package]] +name = "num-bigint" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +dependencies = [ + "autocfg 1.0.0", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f6ea62e9d81a77cd3ee9a2a5b9b609447857f3d358704331e4ef39eb247fcba" +dependencies = [ + "autocfg 1.0.0", + "num-traits", +] + +[[package]] +name = "num-rational" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da4dc79f9e6c81bef96148c8f6b8e72ad4541caa4a24373e900a36da07de03a3" +dependencies = [ + "autocfg 1.0.0", + "num-bigint", + "num-integer", + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c62be47e61d1842b9170f0fdeec8eba98e60e90e5446449a0545e5152acd7096" +dependencies = [ + "autocfg 1.0.0", +] + +[[package]] +name = "once_cell" +version = "1.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c601810575c99596d4afc46f78a678c80105117c379eb3650cf99b8a21ce5b" +dependencies = [ + "parking_lot 0.9.0", +] + +[[package]] +name = "opaque-debug" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2839e79665f131bdb5782e51f2c6c9599c133c6098982a54c794358bf432529c" + +[[package]] +name = "parity-scale-codec" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910" +dependencies = [ + "arrayvec 0.5.1", + "bitvec", + "byte-slice-cast", + "parity-scale-codec-derive", + "serde", +] + +[[package]] +name = "parity-scale-codec-derive" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "parity-util-mem" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef1476e40bf8f5c6776e9600983435821ca86eb9819d74a6207cca69d091406a" +dependencies = [ + "cfg-if", + "impl-trait-for-tuples", + "parity-util-mem-derive", + "parking_lot 0.10.0", + "primitive-types", + "winapi", +] + +[[package]] +name = "parity-util-mem-derive" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" +dependencies = [ + "proc-macro2", + "syn", + "synstructure", +] + +[[package]] +name = "parity-wasm" +version = "0.41.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddfc878dac00da22f8f61e7af3157988424567ab01d9920b962ef7dcbd7cd865" + +[[package]] +name = "parking_lot" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f842b1982eb6c2fe34036a4fbfb06dd185a3f5c8edfaacdf7d1ea10b07de6252" +dependencies = [ + "lock_api", + "parking_lot_core 0.6.2", + "rustc_version", +] + +[[package]] +name = "parking_lot" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92e98c49ab0b7ce5b222f2cc9193fc4efe11c6d0bd4f648e374684a6857b1cfc" +dependencies = [ + "lock_api", + "parking_lot_core 0.7.0", +] + +[[package]] +name = "parking_lot_core" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b876b1b9e7ac6e1a74a6da34d25c42e17e8862aa409cbbbdcfc8d86c6f3bc62b" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "rustc_version", + "smallvec 0.6.13", + "winapi", +] + +[[package]] +name = "parking_lot_core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7582838484df45743c8434fbff785e8edf260c28748353d44bc0da32e0ceabf1" +dependencies = [ + "cfg-if", + "cloudabi", + "libc", + "redox_syscall", + "smallvec 1.3.0", + "winapi", +] + +[[package]] +name = "paste" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e1afe738d71b1ebab5f1207c055054015427dbfc7bbe9ee1266894156ec046" +dependencies = [ + "paste-impl", + "proc-macro-hack", +] + +[[package]] +name = "paste-impl" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d4dc4a7f6f743211c5aab239640a65091535d97d43d92a52bca435a640892bb" +dependencies = [ + "proc-macro-hack", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "pbkdf2" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "006c038a43a45995a9670da19e67600114740e8511d4333bf97a56e66a7542d9" +dependencies = [ + "byteorder", + "crypto-mac", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" + +[[package]] +name = "primitive-types" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e4336f4f5d5524fa60bcbd6fe626f9223d8142a50e7053e979acdf0da41ab975" +dependencies = [ + "fixed-hash", + "impl-codec", + "impl-serde 0.3.0", + "uint", +] + +[[package]] +name = "proc-macro-crate" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10d4b51f154c8a7fb96fd6dad097cb74b863943ec010ac94b9fd1be8861fe1e" +dependencies = [ + "toml", +] + +[[package]] +name = "proc-macro-hack" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "proc-macro2" +version = "1.0.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" +dependencies = [ + "unicode-xid", +] + +[[package]] +name = "quote" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rand" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca" +dependencies = [ + "autocfg 0.1.7", + "libc", + "rand_chacha 0.1.1", + "rand_core 0.4.2", + "rand_hc 0.1.0", + "rand_isaac", + "rand_jitter", + "rand_os", + "rand_pcg", + "rand_xorshift", + "winapi", +] + +[[package]] +name = "rand" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" +dependencies = [ + "getrandom", + "libc", + "rand_chacha 0.2.2", + "rand_core 0.5.1", + "rand_hc 0.2.0", +] + +[[package]] +name = "rand_chacha" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "556d3a1ca6600bfcbab7c7c91ccb085ac7fbbcd70e008a98742e7847f4f7bcef" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.3.1", +] + +[[package]] +name = "rand_chacha" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" +dependencies = [ + "ppv-lite86", + "rand_core 0.5.1", +] + +[[package]] +name = "rand_core" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" +dependencies = [ + "rand_core 0.4.2", +] + +[[package]] +name = "rand_core" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" + +[[package]] +name = "rand_core" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" +dependencies = [ + "getrandom", +] + +[[package]] +name = "rand_hc" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b40677c7be09ae76218dc623efbf7b18e34bced3f38883af07bb75630a21bc4" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_hc" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" +dependencies = [ + "rand_core 0.5.1", +] + +[[package]] +name = "rand_isaac" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded997c9d5f13925be2a6fd7e66bf1872597f759fd9dd93513dd7e92e5a5ee08" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rand_jitter" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1166d5c91dc97b88d1decc3285bb0a99ed84b05cfd0bc2341bdf2d43fc41e39b" +dependencies = [ + "libc", + "rand_core 0.4.2", + "winapi", +] + +[[package]] +name = "rand_os" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b75f676a1e053fc562eafbb47838d67c84801e38fc1ba459e8f180deabd5071" +dependencies = [ + "cloudabi", + "fuchsia-cprng", + "libc", + "rand_core 0.4.2", + "rdrand", + "winapi", +] + +[[package]] +name = "rand_pcg" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44" +dependencies = [ + "autocfg 0.1.7", + "rand_core 0.4.2", +] + +[[package]] +name = "rand_xorshift" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "rdrand" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" +dependencies = [ + "rand_core 0.3.1", +] + +[[package]] +name = "redox_syscall" +version = "0.1.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2439c63f3f6139d1b57529d16bc3b8bb855230c8efcc5d3a896c8bea7c3b1e84" + +[[package]] +name = "regex" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322cf97724bea3ee221b78fe25ac9c46114ebb51747ad5babd51a2fc6a8235a8" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", + "thread_local", +] + +[[package]] +name = "regex-syntax" +version = "0.6.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1132f845907680735a84409c3bebc64d1364a5683ffbce899550cd09d5eaefc1" + +[[package]] +name = "rustc-demangle" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c691c0e608126e00913e33f0ccf3727d5fc84573623b8d65b2df340b5201783" + +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + +[[package]] +name = "rustc-hex" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" + +[[package]] +name = "rustc_version" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" +dependencies = [ + "semver", +] + +[[package]] +name = "schnorrkel" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eacd8381b3c37840c9c9f40472af529e49975bdcbc24f83c31059fd6539023d3" +dependencies = [ + "curve25519-dalek 1.2.3", + "failure", + "merlin", + "rand 0.6.5", + "rand_core 0.4.2", + "rand_os", + "sha2", + "subtle 2.2.2", + "zeroize 0.9.3", +] + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" + +[[package]] +name = "semver" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" +dependencies = [ + "semver-parser", +] + +[[package]] +name = "semver-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" + +[[package]] +name = "serde" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "414115f25f818d7dfccec8ee535d76949ae78584fc4f79a6f45a904bf8ab4449" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sha2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27044adfd2e1f077f649f59deb9490d3941d674002f7d062870a60ebe9bd47a0" +dependencies = [ + "block-buffer", + "digest", + "fake-simd", + "opaque-debug", +] + +[[package]] +name = "smallvec" +version = "0.6.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7b0758c52e15a8b5e3691eae6cc559f08eee9406e548a4477ba4e67770a82b6" +dependencies = [ + "maybe-uninit", +] + +[[package]] +name = "smallvec" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c2fb2ec9bcd216a5b0d0ccf31ab17b5ed1d627960edff65bbe95d3ce221cefc" + +[[package]] +name = "sp-application-crypto" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-arithmetic" +version = "2.0.0-alpha.3" +dependencies = [ + "integer-sqrt", + "num-traits", + "parity-scale-codec", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-core" +version = "2.0.0-alpha.3" +dependencies = [ + "base58", + "blake2-rfc", + "byteorder", + "ed25519-dalek", + "hash-db", + "hash256-std-hasher", + "hex", + "impl-serde 0.3.0", + "lazy_static", + "libsecp256k1", + "log", + "num-traits", + "parity-scale-codec", + "parity-util-mem", + "parking_lot 0.10.0", + "primitive-types", + "rand 0.7.3", + "regex", + "rustc-hex", + "schnorrkel", + "serde", + "sha2", + "sp-debug-derive", + "sp-externalities", + "sp-runtime-interface", + "sp-std", + "sp-storage", + "substrate-bip39", + "tiny-bip39", + "tiny-keccak", + "twox-hash", + "wasmi", + "zeroize 1.1.0", +] + +[[package]] +name = "sp-debug-derive" +version = "2.0.0-alpha.3" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-externalities" +version = "0.8.0-alpha.3" +dependencies = [ + "environmental", + "sp-std", + "sp-storage", +] + +[[package]] +name = "sp-inherents" +version = "2.0.0-alpha.3" +dependencies = [ + "derive_more", + "parity-scale-codec", + "parking_lot 0.10.0", + "sp-core", + "sp-std", +] + +[[package]] +name = "sp-io" +version = "2.0.0-alpha.3" +dependencies = [ + "hash-db", + "libsecp256k1", + "log", + "parity-scale-codec", + "sp-core", + "sp-externalities", + "sp-runtime-interface", + "sp-state-machine", + "sp-std", + "sp-trie", + "sp-wasm-interface", +] + +[[package]] +name = "sp-panic-handler" +version = "2.0.0-alpha.3" +dependencies = [ + "backtrace", + "log", +] + +[[package]] +name = "sp-phragmen" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "serde", + "sp-core", + "sp-phragmen-compact", + "sp-runtime", + "sp-std", +] + +[[package]] +name = "sp-phragmen-compact" +version = "2.0.0-dev" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-phragmen-fuzzer" +version = "2.0.0" +dependencies = [ + "honggfuzz", + "rand 0.7.3", + "sp-phragmen", +] + +[[package]] +name = "sp-runtime" +version = "2.0.0-alpha.3" +dependencies = [ + "hash256-std-hasher", + "impl-trait-for-tuples", + "log", + "parity-scale-codec", + "parity-util-mem", + "paste", + "rand 0.7.3", + "serde", + "sp-application-crypto", + "sp-arithmetic", + "sp-core", + "sp-inherents", + "sp-io", + "sp-std", +] + +[[package]] +name = "sp-runtime-interface" +version = "2.0.0-alpha.3" +dependencies = [ + "parity-scale-codec", + "primitive-types", + "sp-externalities", + "sp-runtime-interface-proc-macro", + "sp-std", + "sp-wasm-interface", + "static_assertions", +] + +[[package]] +name = "sp-runtime-interface-proc-macro" +version = "2.0.0-alpha.3" +dependencies = [ + "Inflector", + "proc-macro-crate", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "sp-state-machine" +version = "0.8.0-alpha.3" +dependencies = [ + "hash-db", + "log", + "num-traits", + "parity-scale-codec", + "parking_lot 0.10.0", + "rand 0.7.3", + "sp-core", + "sp-externalities", + "sp-panic-handler", + "sp-trie", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-std" +version = "2.0.0-alpha.3" + +[[package]] +name = "sp-storage" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-serde 0.2.3", + "serde", + "sp-debug-derive", + "sp-std", +] + +[[package]] +name = "sp-trie" +version = "2.0.0-alpha.3" +dependencies = [ + "hash-db", + "memory-db", + "parity-scale-codec", + "sp-core", + "sp-std", + "trie-db", + "trie-root", +] + +[[package]] +name = "sp-wasm-interface" +version = "2.0.0-alpha.3" +dependencies = [ + "impl-trait-for-tuples", + "parity-scale-codec", + "sp-std", + "wasmi", +] + +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + +[[package]] +name = "substrate-bip39" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3be511be555a3633e71739a79e4ddff6a6aaa6579fa6114182a51d72c3eb93c5" +dependencies = [ + "hmac", + "pbkdf2", + "schnorrkel", + "sha2", +] + +[[package]] +name = "subtle" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d67a5a62ba6e01cb2192ff309324cb4875d0c451d55fe2319433abe7a05a8ee" + +[[package]] +name = "subtle" +version = "2.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" + +[[package]] +name = "syn" +version = "1.0.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" +dependencies = [ + "proc-macro2", + "quote", + "unicode-xid", +] + +[[package]] +name = "synstructure" +version = "0.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "unicode-xid", +] + +[[package]] +name = "thread_local" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d40c6d1b69745a6ec6fb1ca717914848da4b44ae29d9b3080cbee91d72a69b14" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "tiny-bip39" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6848cd8f566953ce1e8faeba12ee23cbdbb0437754792cd857d44628b5685e3" +dependencies = [ + "failure", + "hmac", + "once_cell", + "pbkdf2", + "rand 0.7.3", + "rustc-hash", + "sha2", + "unicode-normalization", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2953ca5148619bc99695c1274cb54c5275bbb913c6adad87e72eaf8db9787f69" +dependencies = [ + "crunchy", +] + +[[package]] +name = "toml" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffc92d160b1eef40665be3a05630d003936a3bc7da7421277846c2613e92c71a" +dependencies = [ + "serde", +] + +[[package]] +name = "trie-db" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" +dependencies = [ + "hash-db", + "hashbrown", + "log", + "rustc-hex", + "smallvec 1.3.0", +] + +[[package]] +name = "trie-root" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "652931506d2c1244d7217a70b99f56718a7b4161b37f04e7cd868072a99f68cd" +dependencies = [ + "hash-db", +] + +[[package]] +name = "twox-hash" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3bfd5b7557925ce778ff9b9ef90e3ade34c524b5ff10e239c69a42d546d2af56" +dependencies = [ + "rand 0.7.3", +] + +[[package]] +name = "typenum" +version = "1.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d2783fe2d6b8c1101136184eb41be8b1ad379e4657050b8aaff0c79ee7575f9" + +[[package]] +name = "uint" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e75a4cdd7b87b28840dba13c483b9a88ee6bbf16ba5c951ee1ecfcf723078e0d" +dependencies = [ + "byteorder", + "crunchy", + "rustc-hex", + "static_assertions", +] + +[[package]] +name = "unicode-normalization" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5479532badd04e128284890390c1e876ef7a993d0570b3597ae43dfa1d59afa4" +dependencies = [ + "smallvec 1.3.0", +] + +[[package]] +name = "unicode-xid" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" + +[[package]] +name = "wasi" +version = "0.9.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" + +[[package]] +name = "wasmi" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf617d864d25af3587aa745529f7aaa541066c876d57e050c0d0c85c61c92aff" +dependencies = [ + "libc", + "memory_units", + "num-rational", + "num-traits", + "parity-wasm", + "wasmi-validation", +] + +[[package]] +name = "wasmi-validation" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea78c597064ba73596099281e2f4cfc019075122a65cdda3205af94f0b264d93" +dependencies = [ + "parity-wasm", +] + +[[package]] +name = "winapi" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8093091eeb260906a183e6ae1abdba2ef5ef2257a21801128899c3fc699229c6" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "zeroize" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45af6a010d13e4cf5b54c94ba5a2b2eba5596b9e46bf5875612d332a1f2b3f86" + +[[package]] +name = "zeroize" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cbac2ed2ba24cc90f5e06485ac8c7c1e5449fe8911aef4d8877218af021a5b8" +dependencies = [ + "zeroize_derive", +] + +[[package]] +name = "zeroize_derive" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" +dependencies = [ + "proc-macro2", + "quote", + "syn", + "synstructure", +] diff --git a/primitives/phragmen/fuzzer/Cargo.toml b/primitives/phragmen/fuzzer/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..90af69a707f75a84b417e2729abf6a245061bb41 --- /dev/null +++ b/primitives/phragmen/fuzzer/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "sp-phragmen-fuzzer" +version = "2.0.0" +authors = ["Parity Technologies "] +edition = "2018" + +[dependencies] +sp-phragmen = { version = "2.0.0-alpha.3", path = ".." } +honggfuzz = "0.5" +rand = "0.7.3" + +[workspace] + +[[bin]] +name = "reduce" +path = "src/reduce.rs" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/phragmen/fuzzer/src/reduce.rs b/primitives/phragmen/fuzzer/src/reduce.rs new file mode 100644 index 0000000000000000000000000000000000000000..4bf08590a149feafae1fc9ab33372c24440377de --- /dev/null +++ b/primitives/phragmen/fuzzer/src/reduce.rs @@ -0,0 +1,145 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # Running +//! +//! Run with `cargo hfuzz run reduce`. `honggfuzz`. +//! +//! # Debugging a panic +//! +//! Once a panic is found, it can be debugged with +//! `cargo hfuzz run-debug reduce hfuzz_workspace/reduce/*.fuzz`. + +use honggfuzz::fuzz; +use sp_phragmen::{StakedAssignment, ExtendedBalance, build_support_map, reduce}; +use rand::{self, Rng}; + +type Balance = u128; +type AccountId = u64; + +/// Or any other token type. +const KSM: Balance = 1_000_000_000_000; + +fn main() { + loop { + fuzz!(|_data: _| { + let (assignments, winners) = generate_random_phragmen_assignment( + rr(100, 1000), + rr(100, 2000), + 8, + 8, + ); + reduce_and_compare(&assignments, &winners); + }); + } +} + +fn generate_random_phragmen_assignment( + voter_count: usize, + target_count: usize, + avg_edge_per_voter: usize, + edge_per_voter_var: usize, +) -> (Vec>, Vec) { + // random in range of (a, b) + let rr_128 = |a: u128, b: u128| -> u128 { rand::thread_rng().gen_range(a, b) }; + + // prefix to distinguish the voter and target account ranges. + let target_prefix = 1_000_000; + // let target_prefix = 1000; + assert!(voter_count < target_prefix); + + let mut assignments = Vec::with_capacity(voter_count as usize); + let mut winners: Vec = Vec::new(); + + let all_targets = (target_prefix..(target_prefix + target_count)) + .map(|a| a as AccountId) + .collect::>(); + + (1..=voter_count).for_each(|acc| { + let mut targets_to_chose_from = all_targets.clone(); + let targets_to_chose = if edge_per_voter_var > 0 { rr( + avg_edge_per_voter - edge_per_voter_var, + avg_edge_per_voter + edge_per_voter_var, + ) } else { avg_edge_per_voter }; + + let distribution = (0..targets_to_chose).map(|_| { + let target = targets_to_chose_from.remove(rr(0, targets_to_chose_from.len())); + if winners.iter().find(|w| **w == target).is_none() { + winners.push(target.clone()); + } + (target, rr_128(1 * KSM, 100 * KSM)) + }).collect::>(); + + assignments.push(StakedAssignment { + who: (acc as AccountId), + distribution, + }); + }); + + (assignments, winners) +} + +fn assert_assignments_equal( + winners: &Vec, + ass1: &Vec>, + ass2: &Vec>, +) { + + let (support_1, _) = build_support_map::(winners, ass1); + let (support_2, _) = build_support_map::(winners, ass2); + + for (who, support) in support_1.iter() { + assert_eq!(support.total, support_2.get(who).unwrap().total); + } +} + +fn reduce_and_compare( + assignment: &Vec>, + winners: &Vec, +) { + let mut altered_assignment = assignment.clone(); + let n = assignment.len() as u32; + let m = winners.len() as u32; + + let edges_before = assignment_len(&assignment); + let num_changed = reduce(&mut altered_assignment); + let edges_after = edges_before - num_changed; + + assert!( + edges_after <= m + n, + "reduce bound not satisfied. n = {}, m = {}, edges after reduce = {} (removed {})", + n, + m, + edges_after, + num_changed, + ); + + assert_assignments_equal( + winners, + &assignment, + &altered_assignment, + ); +} + +fn assignment_len(assignments: &[StakedAssignment]) -> u32 { + let mut counter = 0; + assignments.iter().for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); + counter +} + +fn rr(a: usize, b: usize) -> usize { + rand::thread_rng().gen_range(a, b) +} diff --git a/primitives/phragmen/src/helpers.rs b/primitives/phragmen/src/helpers.rs new file mode 100644 index 0000000000000000000000000000000000000000..27f51b4a05fe2c2c22a02f8442efdd9d9f86124d --- /dev/null +++ b/primitives/phragmen/src/helpers.rs @@ -0,0 +1,94 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Helper methods for phragmen. + +use crate::{Assignment, ExtendedBalance, IdentifierT, StakedAssignment}; +use sp_runtime::PerThing; +use sp_std::prelude::*; + +/// Converts a vector of ratio assignments into ones with absolute budget value. +pub fn assignment_ratio_to_staked( + ratio: Vec>, + stake_of: FS, +) -> Vec> +where + for<'r> FS: Fn(&'r A) -> ExtendedBalance, + T: sp_std::ops::Mul, + ExtendedBalance: From<::Inner>, +{ + ratio + .into_iter() + .map(|a| { + let stake = stake_of(&a.who); + a.into_staked(stake, true) + }) + .collect() +} + +/// Converts a vector of staked assignments into ones with ratio values. +pub fn assignment_staked_to_ratio( + ratio: Vec>, +) -> Vec> +where + ExtendedBalance: From<::Inner>, +{ + ratio.into_iter().map(|a| a.into_assignment(true)).collect() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ExtendedBalance; + use sp_runtime::Perbill; + + #[test] + fn into_staked_works() { + let ratio = vec![ + Assignment { + who: 1u32, + distribution: vec![ + (10u32, Perbill::from_fraction(0.5)), + (20, Perbill::from_fraction(0.5)), + ], + }, + Assignment { + who: 2u32, + distribution: vec![ + (10, Perbill::from_fraction(0.33)), + (20, Perbill::from_fraction(0.67)), + ], + }, + ]; + + let stake_of = |_: &u32| -> ExtendedBalance { 100u128 }; + let staked = assignment_ratio_to_staked(ratio, stake_of); + + assert_eq!( + staked, + vec![ + StakedAssignment { + who: 1u32, + distribution: vec![(10u32, 50), (20, 50),] + }, + StakedAssignment { + who: 2u32, + distribution: vec![(10u32, 33), (20, 67),] + } + ] + ); + } +} diff --git a/primitives/phragmen/src/lib.rs b/primitives/phragmen/src/lib.rs index 23acec19da9f83e884ca65e6058760a88dd4b4a8..c0d94a71e1fc209f4db7085bd8fcba0262d66ecd 100644 --- a/primitives/phragmen/src/lib.rs +++ b/primitives/phragmen/src/lib.rs @@ -33,19 +33,60 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, collections::btree_map::BTreeMap, convert::TryFrom}; -use sp_runtime::{ - PerThing, Rational128, RuntimeDebug, - helpers_128bit::multiply_by_rational, -}; -use sp_runtime::traits::{ - Zero, Convert, Member, AtLeast32Bit, SaturatedConversion, Bounded, Saturating, -}; +use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom}; +use sp_runtime::{helpers_128bit::multiply_by_rational, PerThing, Rational128, RuntimeDebug, SaturatedConversion}; +use sp_runtime::traits::{Zero, Convert, Member, AtLeast32Bit, Saturating, Bounded}; #[cfg(test)] mod mock; #[cfg(test)] mod tests; +#[cfg(feature = "std")] +use serde::{Serialize, Deserialize}; +#[cfg(feature = "std")] +use codec::{Encode, Decode}; + +mod node; +mod reduce; +mod helpers; + +// re-export reduce stuff. +pub use reduce::reduce; + +// re-export the helpers. +pub use helpers::*; + +// re-export the compact macro, with the dependencies of the macro. +#[doc(hidden)] +pub use codec; +#[doc(hidden)] +pub use sp_runtime; + +// re-export the compact solution type. +pub use sp_phragmen_compact::generate_compact_solution_type; + +/// A trait to limit the number of votes per voter. The generated compact type will implement this. +pub trait VotingLimit { + const LIMIT: usize; +} + +/// an aggregator trait for a generic type of a voter/target identifier. This usually maps to +/// substrate's account id. +pub trait IdentifierT: Clone + Eq + Default + Ord + Debug + codec::Codec {} + +impl IdentifierT for T {} + +/// The errors that might occur in the this crate and compact. +#[derive(Debug, Eq, PartialEq)] +pub enum Error { + /// While going from compact to staked, the stake of all the edges has gone above the + /// total and the last stake cannot be assigned. + CompactStakeOverflow, + /// The compact type has a voter who's number of targets is out of bound. + CompactTargetOverflow, + /// One of the index functions returned none. + CompactInvalidIndex, +} /// A type in which performing operations on balances and stakes of candidates and voters are safe. /// @@ -55,6 +96,9 @@ mod tests; /// Balance types converted to `ExtendedBalance` are referred to as `Votes`. pub type ExtendedBalance = u128; +/// The score of an assignment. This can be computed from the support map via [`evaluate_support`]. +pub type PhragmenScore = [ExtendedBalance; 3]; + /// The denominator used for loads. Since votes are collected as u64, the smallest ratio that we /// might collect is `1/approval_stake` where approval stake is the sum of votes. Hence, some number /// bigger than u64::max_value() is needed. For maximum accuracy we simply use u128; @@ -62,11 +106,11 @@ const DEN: u128 = u128::max_value(); /// A candidate entity for phragmen election. #[derive(Clone, Default, RuntimeDebug)] -pub struct Candidate { +struct Candidate { /// Identifier. - pub who: AccountId, + who: AccountId, /// Intermediary value used to sort candidates. - pub score: Rational128, + score: Rational128, /// Sum of the stake of this candidate based on received votes. approval_stake: ExtendedBalance, /// Flag for being elected. @@ -75,7 +119,7 @@ pub struct Candidate { /// A voter entity. #[derive(Clone, Default, RuntimeDebug)] -pub struct Voter { +struct Voter { /// Identifier. who: AccountId, /// List of candidates proposed by this voter. @@ -88,7 +132,7 @@ pub struct Voter { /// A candidate being backed by a voter. #[derive(Clone, Default, RuntimeDebug)] -pub struct Edge { +struct Edge { /// Identifier. who: AccountId, /// Load of this vote. @@ -97,12 +141,6 @@ pub struct Edge { candidate_index: usize, } -/// Particular `AccountId` was backed by `T`-ratio of a nominator's stake. -pub type PhragmenAssignment = (AccountId, T); - -/// Particular `AccountId` was backed by `ExtendedBalance` of a nominator's stake. -pub type PhragmenStakedAssignment = (AccountId, ExtendedBalance); - /// Final result of the phragmen election. #[derive(RuntimeDebug)] pub struct PhragmenResult { @@ -111,7 +149,142 @@ pub struct PhragmenResult { pub winners: Vec<(AccountId, ExtendedBalance)>, /// Individual assignments. for each tuple, the first elements is a voter and the second /// is the list of candidates that it supports. - pub assignments: Vec<(AccountId, Vec>)> + pub assignments: Vec>, +} + +/// A voter's stake assignment among a set of targets, represented as ratios. +#[derive(RuntimeDebug, Clone, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] +pub struct Assignment { + /// Voter's identifier + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, T)>, +} + +impl Assignment +where + ExtendedBalance: From<::Inner>, +{ + /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. + /// + /// It needs `stake` which is the total budget of the voter. If `fill` is set to true, + /// it _tries_ to ensure that all the potential rounding errors are compensated and the + /// distribution's sum is exactly equal to the total budget, by adding or subtracting the + /// remainder from the last distribution. + /// + /// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean + /// anything useful. + pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment + where + T: sp_std::ops::Mul, + { + let mut sum: ExtendedBalance = Bounded::min_value(); + let mut distribution = self + .distribution + .into_iter() + .filter_map(|(target, p)| { + // if this ratio is zero, then skip it. + if p == Bounded::min_value() { + None + } else { + // NOTE: this mul impl will always round to the nearest number, so we might both + // overflow and underflow. + let distribution_stake = p * stake; + // defensive only. We assume that balance cannot exceed extended balance. + sum = sum.saturating_add(distribution_stake); + Some((target, distribution_stake)) + } + }) + .collect::>(); + + if fill { + // NOTE: we can do this better. + // https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b + if let Some(leftover) = stake.checked_sub(sum) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_add(leftover); + } + } else if let Some(excess) = sum.checked_sub(stake) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_sub(excess); + } + } + } + + StakedAssignment { + who: self.who, + distribution, + } + } +} + +/// A voter's stake assignment among a set of targets, represented as absolute values in the scale +/// of [`ExtendedBalance`]. +#[derive(RuntimeDebug, Clone, Default)] +#[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] +pub struct StakedAssignment { + /// Voter's identifier + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, ExtendedBalance)>, +} + +impl StakedAssignment { + /// Converts self into the normal [`Assignment`] type. + /// + /// If `fill` is set to true, it _tries_ to ensure that all the potential rounding errors are + /// compensated and the distribution's sum is exactly equal to 100%, by adding or subtracting + /// the remainder from the last distribution. + /// + /// NOTE: it is quite critical that this attempt always works. The data type returned here will + /// potentially get used to create a compact type; a compact type requires sum of ratios to be + /// less than 100% upon un-compacting. + /// + /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge + /// can never be re-created and does not mean anything useful anymore. + pub fn into_assignment(self, fill: bool) -> Assignment + where + ExtendedBalance: From<::Inner>, + { + let accuracy: u128 = T::ACCURACY.saturated_into(); + let mut sum: u128 = Zero::zero(); + let stake = self.distribution.iter().map(|x| x.1).sum(); + let mut distribution = self + .distribution + .into_iter() + .filter_map(|(target, w)| { + let per_thing = T::from_rational_approximation(w, stake); + if per_thing == Bounded::min_value() { + None + } else { + sum += per_thing.clone().deconstruct().saturated_into(); + Some((target, per_thing)) + } + }) + .collect::>(); + + if fill { + if let Some(leftover) = accuracy.checked_sub(sum) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_add( + T::from_parts(leftover.saturated_into()) + ); + } + } else if let Some(excess) = sum.checked_sub(accuracy) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_sub( + T::from_parts(excess.saturated_into()) + ); + } + } + } + + Assignment { + who: self.who, + distribution, + } + } } /// A structure to demonstrate the phragmen result from the perspective of the candidate, i.e. how @@ -122,12 +295,12 @@ pub struct PhragmenResult { /// This, at the current version, resembles the `Exposure` defined in the Staking pallet, yet /// they do not necessarily have to be the same. #[derive(Default, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize, Eq, PartialEq))] +#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Eq, PartialEq))] pub struct Support { /// Total support. pub total: ExtendedBalance, /// Support from voters. - pub voters: Vec>, + pub voters: Vec<(AccountId, ExtendedBalance)>, } /// A linkage from a candidate and its [`Support`]. @@ -149,16 +322,14 @@ pub type SupportMap = BTreeMap>; /// responsibility of the caller to make sure only those candidates who have a sensible economic /// value are passed in. From the perspective of this function, a candidate can easily be among the /// winner with no backing stake. -pub fn elect( +pub fn elect( candidate_count: usize, minimum_candidate_count: usize, initial_candidates: Vec, - initial_voters: Vec<(AccountId, Vec)>, - stake_of: FS, + initial_voters: Vec<(AccountId, Balance, Vec)>, ) -> Option> where AccountId: Default + Ord + Member, Balance: Default + Copy + AtLeast32Bit, - for<'r> FS: Fn(&'r AccountId) -> Balance, C: Convert + Convert, R: PerThing, { @@ -166,7 +337,7 @@ pub fn elect( // return structures let mut elected_candidates: Vec<(AccountId, ExtendedBalance)>; - let mut assigned: Vec<(AccountId, Vec>)>; + let mut assigned: Vec>; // used to cache and access candidates index. let mut c_idx_cache = BTreeMap::::new(); @@ -191,8 +362,7 @@ pub fn elect( // collect voters. use `c_idx_cache` for fast access and aggregate `approval_stake` of // candidates. - voters.extend(initial_voters.into_iter().map(|(who, votes)| { - let voter_stake = stake_of(&who); + voters.extend(initial_voters.into_iter().map(|(who, voter_stake, votes)| { let mut edges: Vec> = Vec::with_capacity(votes.len()); for v in votes { if let Some(idx) = c_idx_cache.get(&v) { @@ -274,7 +444,10 @@ pub fn elect( // update backing stake of candidates and voters for n in &mut voters { - let mut assignment = (n.who.clone(), vec![]); + let mut assignment = Assignment { + who: n.who.clone(), + ..Default::default() + }; for e in &mut n.edges { if elected_candidates.iter().position(|(ref c, _)| *c == e.who).is_some() { let per_bill_parts: R::Inner = @@ -301,44 +474,45 @@ pub fn elect( // R::Inner. .unwrap_or(Bounded::max_value()) } else { - // defensive only. Both edge and nominator loads are built from + // defensive only. Both edge and voter loads are built from // scores, hence MUST have the same denominator. Zero::zero() } } }; let per_thing = R::from_parts(per_bill_parts); - assignment.1.push((e.who.clone(), per_thing)); + assignment.distribution.push((e.who.clone(), per_thing)); } } - if assignment.1.len() > 0 { - // To ensure an assertion indicating: no stake from the nominator going to waste, + let len = assignment.distribution.len(); + if len > 0 { + // To ensure an assertion indicating: no stake from the voter going to waste, // we add a minimal post-processing to equally assign all of the leftover stake ratios. - let vote_count: R::Inner = assignment.1.len().saturated_into(); - let len = assignment.1.len(); - let mut sum: R::Inner = Zero::zero(); - assignment.1.iter().for_each(|a| sum = sum.saturating_add(a.1.deconstruct())); + let vote_count: R::Inner = len.saturated_into(); let accuracy = R::ACCURACY; + let mut sum: R::Inner = Zero::zero(); + assignment.distribution.iter().for_each(|a| sum = sum.saturating_add(a.1.deconstruct())); + let diff = accuracy.saturating_sub(sum); let diff_per_vote = (diff / vote_count).min(accuracy); if !diff_per_vote.is_zero() { for i in 0..len { - let current_ratio = assignment.1[i % len].1; + let current_ratio = assignment.distribution[i % len].1; let next_ratio = current_ratio .saturating_add(R::from_parts(diff_per_vote)); - assignment.1[i % len].1 = next_ratio; + assignment.distribution[i % len].1 = next_ratio; } } - // `remainder` is set to be less than maximum votes of a nominator (currently 16). + // `remainder` is set to be less than maximum votes of a voter (currently 16). // safe to cast it to usize. let remainder = diff - diff_per_vote * vote_count; for i in 0..remainder.saturated_into::() { - let current_ratio = assignment.1[i % len].1; + let current_ratio = assignment.distribution[i % len].1; let next_ratio = current_ratio.saturating_add(R::from_parts(1u8.into())); - assignment.1[i % len].1 = next_ratio; + assignment.distribution[i % len].1 = next_ratio; } assigned.push(assignment); } @@ -350,39 +524,109 @@ pub fn elect( }) } -/// Build the support map from the given phragmen result. -pub fn build_support_map( - elected_stashes: &Vec, - assignments: &Vec<(AccountId, Vec>)>, - stake_of: FS, -) -> SupportMap where +/// Build the support map from the given phragmen result. It maps a flat structure like +/// +/// ```nocompile +/// assignments: vec![ +/// voter1, vec![(candidate1, w11), (candidate2, w12)], +/// voter2, vec![(candidate1, w21), (candidate2, w22)] +/// ] +/// ``` +/// +/// into a mapping of candidates and their respective support: +/// +/// ```nocompile +/// SupportMap { +/// candidate1: Support { +/// own:0, +/// total: w11 + w21, +/// others: vec![(candidate1, w11), (candidate2, w21)] +/// }, +/// candidate2: Support { +/// own:0, +/// total: w12 + w22, +/// others: vec![(candidate1, w12), (candidate2, w22)] +/// }, +/// } +/// ``` +/// +/// The second returned flag indicates the number of edges who didn't corresponded to an actual +/// winner from the given winner set. A value in this place larger than 0 indicates a potentially +/// faulty assignment. +/// +/// `O(E)` where `E` is the total number of edges. +pub fn build_support_map( + winners: &[AccountId], + assignments: &[StakedAssignment], +) -> (SupportMap, u32) where AccountId: Default + Ord + Member, - Balance: Default + Copy + AtLeast32Bit, - C: Convert + Convert, - for<'r> FS: Fn(&'r AccountId) -> Balance, - R: PerThing + sp_std::ops::Mul, { - let to_votes = |b: Balance| >::convert(b) as ExtendedBalance; + let mut errors = 0; // Initialize the support of each candidate. let mut supports = >::new(); - elected_stashes + winners .iter() .for_each(|e| { supports.insert(e.clone(), Default::default()); }); // build support struct. - for (n, assignment) in assignments.iter() { - for (c, per_thing) in assignment.iter() { - let nominator_stake = to_votes(stake_of(n)); - // AUDIT: it is crucially important for the `Mul` implementation of all - // per-things to be sound. - let other_stake = *per_thing * nominator_stake; + for StakedAssignment { who, distribution } in assignments.iter() { + for (c, weight_extended) in distribution.iter() { if let Some(support) = supports.get_mut(c) { - support.voters.push((n.clone(), other_stake)); - support.total = support.total.saturating_add(other_stake); + support.total = support.total.saturating_add(*weight_extended); + support.voters.push((who.clone(), *weight_extended)); + } else { + errors = errors.saturating_add(1); } } } - supports + (supports, errors) +} + +/// Evaluate a phragmen result, given the support map. The returned tuple contains: +/// +/// - Minimum support. This value must be **maximized**. +/// - Sum of all supports. This value must be **maximized**. +/// - Sum of all supports squared. This value must be **minimized**. +/// +/// `O(E)` where `E` is the total number of edges. +pub fn evaluate_support( + support: &SupportMap, +) -> PhragmenScore { + let mut min_support = ExtendedBalance::max_value(); + let mut sum: ExtendedBalance = Zero::zero(); + // NOTE: this will probably saturate but using big num makes it even slower. We'll have to see. + // This must run on chain.. + let mut sum_squared: ExtendedBalance = Zero::zero(); + for (_, support) in support.iter() { + sum += support.total; + let squared = support.total.saturating_mul(support.total); + sum_squared = sum_squared.saturating_add(squared); + if support.total < min_support { + min_support = support.total; + } + } + [min_support, sum, sum_squared] +} + +/// Compares two sets of phragmen scores based on desirability and returns true if `that` is +/// better than `this`. +/// +/// Evaluation is done in a lexicographic manner. +/// +/// Note that the third component should be minimized. +pub fn is_score_better(this: PhragmenScore, that: PhragmenScore) -> bool { + match that + .iter() + .enumerate() + .map(|(i, e)| e.cmp(&this[i])) + .collect::>() + .as_slice() + { + [Ordering::Greater, _, _] => true, + [Ordering::Equal, Ordering::Greater, _] => true, + [Ordering::Equal, Ordering::Equal, Ordering::Less] => true, + _ => false, + } } /// Performs equalize post-processing to the output of the election algorithm. This happens in @@ -391,13 +635,13 @@ pub fn build_support_map( /// /// No value is returned from the function and the `supports` parameter is updated. /// -/// * `assignments`: exactly the same is the output of phragmen. -/// * `supports`: mutable reference to s `SupportMap`. This parameter is updated. -/// * `tolerance`: maximum difference that can occur before an early quite happens. -/// * `iterations`: maximum number of iterations that will be processed. -/// * `stake_of`: something that can return the stake stake of a particular candidate or voter. +/// - `assignments`: exactly the same is the output of phragmen. +/// - `supports`: mutable reference to s `SupportMap`. This parameter is updated. +/// - `tolerance`: maximum difference that can occur before an early quite happens. +/// - `iterations`: maximum number of iterations that will be processed. +/// - `stake_of`: something that can return the stake stake of a particular candidate or voter. pub fn equalize( - mut assignments: Vec<(AccountId, Vec>)>, + mut assignments: Vec>, supports: &mut SupportMap, tolerance: ExtendedBalance, iterations: usize, @@ -411,13 +655,13 @@ pub fn equalize( for _i in 0..iterations { let mut max_diff = 0; - for (voter, assignment) in assignments.iter_mut() { - let voter_budget = stake_of(&voter); + for StakedAssignment { who, distribution } in assignments.iter_mut() { + let voter_budget = stake_of(&who); let diff = do_equalize::<_, _, C>( - voter, + who, voter_budget, - assignment, + distribution, supports, tolerance, ); @@ -435,7 +679,7 @@ pub fn equalize( fn do_equalize( voter: &AccountId, budget_balance: Balance, - elected_edges: &mut Vec>, + elected_edges: &mut Vec<(AccountId, ExtendedBalance)>, support_map: &mut SupportMap, tolerance: ExtendedBalance ) -> ExtendedBalance where diff --git a/primitives/phragmen/src/mock.rs b/primitives/phragmen/src/mock.rs index b3110a5dba713d232b881f73f35e96d7493b3fe9..31ce3d38c3500ef930d3676a141beabb8694b275 100644 --- a/primitives/phragmen/src/mock.rs +++ b/primitives/phragmen/src/mock.rs @@ -18,10 +18,10 @@ #![cfg(test)] -use crate::{elect, PhragmenResult, PhragmenAssignment}; +use crate::{elect, PhragmenResult, Assignment}; use sp_runtime::{ - assert_eq_error_rate, Perbill, PerThing, - traits::{Convert, Member, SaturatedConversion} + assert_eq_error_rate, PerThing, + traits::{Convert, Member, SaturatedConversion, Zero, One} }; use sp_std::collections::btree_map::BTreeMap; @@ -320,27 +320,27 @@ pub(crate) fn create_stake_of(stakes: &[(AccountId, Balance)]) } -pub fn check_assignments(assignments: Vec<(AccountId, Vec>)>) { - for (_, a) in assignments { - let sum: u32 = a.iter().map(|(_, p)| p.deconstruct()).sum(); - assert_eq_error_rate!(sum, Perbill::ACCURACY, 5); +pub fn check_assignments_sum(assignments: Vec>) { + for Assignment { distribution, .. } in assignments { + let mut sum: u128 = Zero::zero(); + distribution.iter().for_each(|(_, p)| sum += p.deconstruct().saturated_into()); + assert_eq_error_rate!(sum, T::ACCURACY.saturated_into(), 1); } } -pub(crate) fn run_and_compare( +pub(crate) fn run_and_compare( candidates: Vec, voters: Vec<(AccountId, Vec)>, - stake_of: Box Balance>, + stake_of: &Box Balance>, to_elect: usize, min_to_elect: usize, ) { // run fixed point code. - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Perbill>( + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Output>( to_elect, min_to_elect, candidates.clone(), - voters.clone(), - &stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); // run float poc code. @@ -354,14 +354,14 @@ pub(crate) fn run_and_compare( assert_eq!(winners, truth_value.winners); - for (nominator, assigned) in assignments.clone() { - if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == nominator) { - for (candidate, per_thingy) in assigned { + for Assignment { who, distribution } in assignments.clone() { + if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == who) { + for (candidate, per_thingy) in distribution { if let Some(float_assignment) = float_assignments.1.iter().find(|x| x.0 == candidate ) { assert_eq_error_rate!( - Perbill::from_fraction(float_assignment.1).deconstruct(), + Output::from_fraction(float_assignment.1).deconstruct(), per_thingy.deconstruct(), - 1, + Output::Inner::one(), ); } else { panic!("candidate mismatch. This should never happen.") @@ -372,7 +372,7 @@ pub(crate) fn run_and_compare( } } - check_assignments(assignments); + check_assignments_sum(assignments); } pub(crate) fn build_support_map_float( diff --git a/primitives/phragmen/src/node.rs b/primitives/phragmen/src/node.rs new file mode 100644 index 0000000000000000000000000000000000000000..92ef325a348724762f3c92f29849963c1cfe4edb --- /dev/null +++ b/primitives/phragmen/src/node.rs @@ -0,0 +1,287 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! (very) Basic implementation of a graph node used in the reduce algorithm. + +use sp_runtime::RuntimeDebug; +use sp_std::{cell::RefCell, fmt, prelude::*, rc::Rc}; + +/// The role that a node can accept. +#[derive(PartialEq, Eq, Ord, PartialOrd, Clone, RuntimeDebug)] +pub(crate) enum NodeRole { + /// A voter. This is synonym to a nominator in a staking context. + Voter, + /// A target. This is synonym to a candidate/validator in a staking context. + Target, +} + +pub(crate) type RefCellOf = Rc>; +pub(crate) type NodeRef = RefCellOf>; + +/// Identifier of a node. This is particularly handy to have a proper `PartialEq` implementation. +/// Otherwise, self votes wouldn't have been indistinguishable. +#[derive(PartialOrd, Ord, Clone, PartialEq, Eq)] +pub(crate) struct NodeId { + /// An account-like identifier representing the node. + pub who: A, + /// The role of the node. + pub role: NodeRole, +} + +impl NodeId { + /// Create a new [`NodeId`]. + pub fn from(who: A, role: NodeRole) -> Self { + Self { who, role } + } +} + +#[cfg(feature = "std")] +impl sp_std::fmt::Debug for NodeId { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!( + f, + "Node({:?}, {:?})", + self.who, + if self.role == NodeRole::Voter { + "V" + } else { + "T" + } + ) + } +} + +/// A one-way graph note. This can only store a pointer to its parent. +#[derive(Clone)] +pub(crate) struct Node { + /// The identifier of the note. + pub(crate) id: NodeId, + /// The parent pointer. + pub(crate) parent: Option>, +} + +impl PartialEq for Node { + fn eq(&self, other: &Node) -> bool { + self.id == other.id + } +} + +impl Eq for Node {} + +#[cfg(feature = "std")] +impl fmt::Debug for Node { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "({:?} --> {:?})", + self.id, + self.parent.as_ref().map(|p| p.borrow().id.clone()) + ) + } +} + +impl Node { + /// Create a new [`Node`] + pub fn new(id: NodeId) -> Node { + Self { id, parent: None } + } + + /// Returns true if `other` is the parent of `who`. + pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { + if who.borrow().parent.is_none() { + return false; + } + who.borrow().parent.as_ref() == Some(other) + } + + /// Removes the parent of `who`. + pub fn remove_parent(who: &NodeRef) { + who.borrow_mut().parent = None; + } + + /// Sets `who`'s parent to be `parent`. + pub fn set_parent_of(who: &NodeRef, parent: &NodeRef) { + who.borrow_mut().parent = Some(parent.clone()); + } + + /// Finds the root of `start`. It return a tuple of `(root, root_vec)` where `root_vec` is the + /// vector of Nodes leading to the root. Hence the first element is the start itself and the + /// last one is the root. As convenient, the root itself is also returned as the first element + /// of the tuple. + /// + /// This function detects cycles and breaks as soon a duplicate node is visited, returning the + /// cycle up to but not including the duplicate node. + /// + /// If you are certain that no cycles exist, you can use [`root_unchecked`]. + pub fn root(start: &NodeRef) -> (NodeRef, Vec>) { + let mut parent_path: Vec> = Vec::new(); + let mut visited: Vec> = Vec::new(); + + parent_path.push(start.clone()); + visited.push(start.clone()); + let mut current = start.clone(); + + while let Some(ref next_parent) = current.clone().borrow().parent { + if visited.contains(next_parent) { + break; + } + parent_path.push(next_parent.clone()); + current = next_parent.clone(); + visited.push(current.clone()); + } + + (current, parent_path) + } + + /// Consumes self and wraps it in a `Rc>`. This type can be used as the pointer type + /// to a parent node. + pub fn into_ref(self) -> NodeRef { + Rc::from(RefCell::from(self)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + fn id(i: u32) -> NodeId { + NodeId::from(i, NodeRole::Target) + } + + #[test] + fn basic_create_works() { + let node = Node::new(id(10)); + assert_eq!( + node, + Node { + id: NodeId { + who: 10, + role: NodeRole::Target + }, + parent: None + } + ); + } + + #[test] + fn set_parent_works() { + let a = Node::new(id(10)).into_ref(); + let b = Node::new(id(20)).into_ref(); + + assert_eq!(a.borrow().parent, None); + Node::set_parent_of(&a, &b); + assert_eq!(*a.borrow().parent.as_ref().unwrap(), b); + } + + #[test] + fn get_root_singular() { + let a = Node::new(id(1)).into_ref(); + assert_eq!(Node::root(&a), (a.clone(), vec![a.clone()])); + } + + #[test] + fn get_root_works() { + // D <-- A <-- B <-- C + // \ + // <-- E + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + let d = Node::new(id(4)).into_ref(); + let e = Node::new(id(5)).into_ref(); + let f = Node::new(id(6)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&e, &a); + Node::set_parent_of(&a, &d); + + assert_eq!( + Node::root(&e), + (d.clone(), vec![e.clone(), a.clone(), d.clone()]), + ); + + assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()]),); + + assert_eq!( + Node::root(&c), + (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]), + ); + + // D A <-- B <-- C + // F <-- / \ + // <-- E + Node::set_parent_of(&a, &f); + + assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()]),); + + assert_eq!( + Node::root(&c), + (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]), + ); + } + + #[test] + fn get_root_on_cycle() { + // A ---> B + // | | + // <---- C + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &a); + + let (root, path) = Node::root(&a); + assert_eq!(root, c); + assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); + } + + #[test] + fn get_root_on_cycle_2() { + // A ---> B + // | | | + // - C + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &b); + + let (root, path) = Node::root(&a); + assert_eq!(root, c); + assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); + } + + #[test] + fn node_cmp_stack_overflows_on_non_unique_elements() { + // To make sure we don't stack overflow on duplicate who. This needs manual impl of + // PartialEq. + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &a); + + Node::root(&a); + } +} diff --git a/primitives/phragmen/src/reduce.rs b/primitives/phragmen/src/reduce.rs new file mode 100644 index 0000000000000000000000000000000000000000..1f6f6c3b991504f1ccd5f9092acbf80870e9d0a6 --- /dev/null +++ b/primitives/phragmen/src/reduce.rs @@ -0,0 +1,1076 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Rust implementation of the Phragmén reduce algorithm. This can be used by any off chain +//! application to reduce cycles from the edge assignment, which will result in smaller size. +//! +//! ### Notions +//! - `m`: size of the committee to elect. +//! - `k`: maximum allowed votes (16 as of this writing). +//! - `nv ∈ E` means that nominator `n ∈ N` supports the election of candidate `v ∈ V`. +//! - A valid solution consists of a tuple `(S, W)` , where `S ⊆ V` is a committee of m validators, +//! and `W : E → R ≥ 0` is an edge weight vector which describes how the budget of each nominator +//! n is fractionally assigned to n 's elected neighbors. +//! - `E_w := { e ∈ E : w_e > 0 }`. +//! +//! ### Algorithm overview +//! +//! > We consider the input edge weight vector `w` as a directed flow over `E_w` , where the flow in +//! > each edge is directed from the nominator to the validator. We build `w′` from `w` by removing +//! > **circulations** to cancel out the flow over as many edges as possible, while preserving flow +//! > demands over all vertices and without reverting the flow direction over any edge. As long as +//! > there is a cycle, we can remove an additional circulation and eliminate at least one new edge +//! > from `E_w′` . This shows that the algorithm always progresses and will eventually finish with +//! > an acyclic edge support. We keep a data structure that represents a forest of rooted trees, +//! > which is initialized as a collection of singletons – one per vertex – and to which edges in +//! > `E_w` are added one by one, causing the trees to merge. Whenever a new edge creates a cycle, +//! > we detect it and destroy it by removing a circulation. We also run a pre-computation which is +//! > designed to detect and remove cycles of length four exclusively. This pre-computation is +//! > optional, and if we skip it then the running time becomes `O (|E_w| ⋅ m), so the +//! > pre-computation makes sense only if `m >> k` and `|E_w| >> m^2`. +//! +//! ### Resources: +//! +//! 1. https://hackmd.io/JOn9x98iS0e0DPWQ87zGWg?view + +use crate::node::{Node, NodeId, NodeRef, NodeRole}; +use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; +use sp_runtime::traits::{Bounded, Zero}; +use sp_std::{ + collections::btree_map::{BTreeMap, Entry::*}, + prelude::*, +}; + +/// Map type used for reduce_4. Can be easily swapped with HashMap. +type Map = BTreeMap<(A, A), A>; + +/// Returns all combinations of size two in the collection `input` with no repetition. +fn combinations_2(input: &[T]) -> Vec<(T, T)> { + let n = input.len(); + if n < 2 { + return Default::default(); + } + + let mut comb = Vec::with_capacity(n * (n - 1) / 2); + for i in 0..n { + for j in i + 1..n { + comb.push((input[i].clone(), input[j].clone())) + } + } + comb +} + +/// Returns the count of trailing common elements in two slices. +pub(crate) fn trailing_common(t1: &[T], t2: &[T]) -> usize { + t1.iter().rev().zip(t2.iter().rev()).take_while(|e| e.0 == e.1).count() +} + +/// Merges two parent roots as described by the reduce algorithm. +fn merge(voter_root_path: Vec>, target_root_path: Vec>) { + let (shorter_path, longer_path) = if voter_root_path.len() <= target_root_path.len() { + (voter_root_path, target_root_path) + } else { + (target_root_path, voter_root_path) + }; + + // iterate from last to beginning, skipping the first one. This asserts that + // indexing is always correct. + shorter_path + .iter() + .zip(shorter_path.iter().skip(1)) + .for_each(|(voter, next)| Node::set_parent_of(&next, &voter)); + Node::set_parent_of(&shorter_path[0], &longer_path[0]); +} + +/// Reduce only redundant edges with cycle length of 4. +/// +/// Returns the number of edges removed. +/// +/// It is strictly assumed that the `who` attribute of all provided assignments are unique. The +/// result will most likely be corrupt otherwise. +/// +/// O(|E_w| ⋅ k). +fn reduce_4(assignments: &mut Vec>) -> u32 { + let mut combination_map: Map = Map::new(); + let mut num_changed: u32 = Zero::zero(); + + // we have to use the old fashioned loops here with manual indexing. Borrowing assignments will + // not work since then there is NO way to mutate it inside. + for assignment_index in 0..assignments.len() { + let who = assignments[assignment_index].who.clone(); + + // all combinations for this particular voter + let distribution_ids = &assignments[assignment_index] + .distribution + .iter() + .map(|(t, _p)| t.clone()) + .collect::>(); + let candidate_combinations = combinations_2(distribution_ids); + + for (v1, v2) in candidate_combinations { + match combination_map.entry((v1.clone(), v2.clone())) { + Vacant(entry) => { + entry.insert(who.clone()); + } + Occupied(mut entry) => { + let other_who = entry.get_mut(); + + // double check if who is still voting for this pair. If not, it means that this + // pair is no longer valid and must have been removed in previous rounds. The + // reason for this is subtle; candidate_combinations is created once while the + // inner loop might remove some edges. Note that if count() > 2, the we have + // duplicates. + if assignments[assignment_index] + .distribution + .iter() + .filter(|(t, _)| *t == v1 || *t == v2) + .count() != 2 + { + continue; + } + + // check if other_who voted for the same pair v1, v2. + let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); + if maybe_other_assignments.is_none() { + continue; + } + let other_assignment = + maybe_other_assignments.expect("value is checked to be 'Some'"); + + // Collect potential cycle votes + let mut other_cycle_votes = other_assignment + .distribution + .iter() + .filter_map(|(t, w)| { + if *t == v1 || *t == v2 { + Some((t.clone(), *w)) + } else { + None + } + }) + .collect::>(); + + let other_votes_count = other_cycle_votes.len(); + + // If the length is more than 2, then we have identified duplicates. For now, we + // just skip. Later on we can early exit and stop processing this data since it + // is corrupt anyhow. + debug_assert!(other_votes_count <= 2); + + if other_votes_count < 2 { + // This is not a cycle. Replace and continue. + *other_who = who.clone(); + continue; + } else if other_votes_count == 2 { + // This is a cycle. + let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); + assignments[assignment_index] + .distribution + .iter() + .for_each(|(t, w)| { + if *t == v1 || *t == v2 { + who_cycle_votes.push((t.clone(), *w)); + } + }); + + if who_cycle_votes.len() != 2 { + continue; + } + + // Align the targets similarly. This helps with the circulation below. + if other_cycle_votes[0].0 != who_cycle_votes[0].0 { + other_cycle_votes.swap(0, 1); + } + + // Find min + let mut min_value: ExtendedBalance = Bounded::max_value(); + let mut min_index: usize = 0; + let cycle = who_cycle_votes + .iter() + .chain(other_cycle_votes.iter()) + .enumerate() + .map(|(index, (t, w))| { + if *w <= min_value { + min_value = *w; + min_index = index; + } + (t.clone(), *w) + }) + .collect::>(); + + // min was in the first part of the chained iters + let mut increase_indices: Vec = Vec::new(); + let mut decrease_indices: Vec = Vec::new(); + decrease_indices.push(min_index); + if min_index < 2 { + // min_index == 0 => sibling_index <- 1 + // min_index == 1 => sibling_index <- 0 + let sibling_index = 1 - min_index; + increase_indices.push(sibling_index); + // valid because the two chained sections of `cycle` are aligned; + // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. + decrease_indices.push(sibling_index + 2); + increase_indices.push(min_index + 2); + } else { + // min_index == 2 => sibling_index <- 3 + // min_index == 3 => sibling_index <- 2 + let sibling_index = 3 - min_index % 2; + increase_indices.push(sibling_index); + // valid because the two chained sections of `cycle` are aligned; + // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. + decrease_indices.push(sibling_index - 2); + increase_indices.push(min_index - 2); + } + + // apply changes + let mut remove_indices: Vec = Vec::with_capacity(1); + increase_indices.into_iter().for_each(|i| { + let voter = if i < 2 { + who.clone() + } else { + other_who.clone() + }; + // Note: so this is pretty ambiguous. We should only look for one + // assignment that meets this criteria and if we find multiple then that + // is a corrupt input. Same goes for the next block. + assignments + .iter_mut() + .filter(|a| a.who == voter) + .for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_add(min_value); + ass.distribution[idx].1 = next_value; + }); + }); + }); + decrease_indices.into_iter().for_each(|i| { + let voter = if i < 2 { + who.clone() + } else { + other_who.clone() + }; + assignments + .iter_mut() + .filter(|a| a.who == voter) + .for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_sub(min_value); + if next_value.is_zero() { + ass.distribution.remove(idx); + remove_indices.push(i); + num_changed += 1; + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + }); + + // remove either one of them. + let who_removed = remove_indices.iter().find(|i| **i < 2usize).is_some(); + let other_removed = + remove_indices.into_iter().find(|i| *i >= 2usize).is_some(); + + match (who_removed, other_removed) { + (false, true) => { + *other_who = who.clone(); + } + (true, false) => { + // nothing, other_who can stay there. + } + (true, true) => { + // remove and don't replace + entry.remove(); + } + (false, false) => { + // Neither of the edges was removed? impossible. + panic!("Duplicate voter (or other corrupt input)."); + } + } + } + } + } + } + } + + num_changed +} + +/// Reduce redundant edges from the edge weight graph, with all possible length. +/// +/// To get the best performance, this should be called after `reduce_4()`. +/// +/// Returns the number of edges removed. +/// +/// It is strictly assumed that the `who` attribute of all provided assignments are unique. The +/// result will most likely be corrupt otherwise. +/// +/// O(|Ew| ⋅ m) +fn reduce_all(assignments: &mut Vec>) -> u32 { + let mut num_changed: u32 = Zero::zero(); + let mut tree: BTreeMap, NodeRef> = BTreeMap::new(); + + // NOTE: This code can heavily use an index cache. Looking up a pair of (voter, target) in the + // assignments happens numerous times and and we can save time. For now it is written as such + // because abstracting some of this code into a function/closure is super hard due to borrow + // checks (and most likely needs unsafe code at the end). For now I will keep it as it and + // refactor later. + + // a flat iterator of (voter, target) over all pairs of votes. Similar to reduce_4, we loop + // without borrowing. + for assignment_index in 0..assignments.len() { + let voter = assignments[assignment_index].who.clone(); + + let mut dist_index = 0; + loop { + // A distribution could have been removed. We don't know for sure. Hence, we check. + let maybe_dist = assignments[assignment_index].distribution.get(dist_index); + if maybe_dist.is_none() { + // The rest of this loop is moot. + break; + } + let (target, _) = maybe_dist.expect("Value checked to be some").clone(); + + // store if they existed already. + let voter_id = NodeId::from(voter.clone(), NodeRole::Voter); + let target_id = NodeId::from(target.clone(), NodeRole::Target); + let voter_exists = tree.contains_key(&voter_id); + let target_exists = tree.contains_key(&target_id); + + // create both. + let voter_node = tree + .entry(voter_id.clone()) + .or_insert(Node::new(voter_id).into_ref()) + .clone(); + let target_node = tree + .entry(target_id.clone()) + .or_insert(Node::new(target_id).into_ref()) + .clone(); + + // If one exists but the other one doesn't, or if both does not, then set the existing + // one as the parent of the non-existing one and move on. Else, continue with the rest + // of the code. + match (voter_exists, target_exists) { + (false, false) => { + Node::set_parent_of(&target_node, &voter_node); + dist_index += 1; + continue; + } + (false, true) => { + Node::set_parent_of(&voter_node, &target_node); + dist_index += 1; + continue; + } + (true, false) => { + Node::set_parent_of(&target_node, &voter_node); + dist_index += 1; + continue; + } + (true, true) => { /* don't continue and execute the rest */ } + }; + + let (voter_root, voter_root_path) = Node::root(&voter_node); + let (target_root, target_root_path) = Node::root(&target_node); + + if voter_root != target_root { + // swap + merge(voter_root_path, target_root_path); + dist_index += 1; + } else { + // find common and cycle. + let common_count = trailing_common(&voter_root_path, &target_root_path); + + // because roots are the same. + #[cfg(feature = "std")] + debug_assert_eq!( + target_root_path.last().unwrap(), + voter_root_path.last().unwrap() + ); + debug_assert!(common_count > 0); + + // cycle part of each path will be `path[path.len() - common_count - 1 : 0]` + // NOTE: the order of chaining is important! it is always build from [target, ..., + // voter] + let cycle = target_root_path + .iter() + .take(target_root_path.len() - common_count + 1) + .cloned() + .chain( + voter_root_path + .iter() + .take(voter_root_path.len() - common_count) + .rev() + .cloned(), + ) + .collect::>>(); + + // a cycle's length shall always be multiple of two. + #[cfg(feature = "std")] + debug_assert_eq!(cycle.len() % 2, 0); + + // find minimum of cycle. + let mut min_value: ExtendedBalance = Bounded::max_value(); + // The voter and the target pair that create the min edge. + let mut min_target: A = Default::default(); + let mut min_voter: A = Default::default(); + // The index of the min in opaque cycle list. + let mut min_index = 0usize; + // 1 -> next // 0 -> prev + let mut min_direction = 0u32; + // helpers + let next_index = |i| { + if i < (cycle.len() - 1) { + i + 1 + } else { + 0 + } + }; + let prev_index = |i| { + if i > 0 { + i - 1 + } else { + cycle.len() - 1 + } + }; + for i in 0..cycle.len() { + if cycle[i].borrow().id.role == NodeRole::Voter { + // NOTE: sadly way too many clones since I don't want to make A: Copy + let current = cycle[i].borrow().id.who.clone(); + let next = cycle[next_index(i)].borrow().id.who.clone(); + let prev = cycle[prev_index(i)].borrow().id.who.clone(); + assignments.iter().find(|a| a.who == current).map(|ass| { + ass.distribution.iter().find(|d| d.0 == next).map(|(_, w)| { + if *w < min_value { + min_value = *w; + min_target = next.clone(); + min_voter = current.clone(); + min_index = i; + min_direction = 1; + } + }) + }); + assignments.iter().find(|a| a.who == current).map(|ass| { + ass.distribution.iter().find(|d| d.0 == prev).map(|(_, w)| { + if *w < min_value { + min_value = *w; + min_target = prev.clone(); + min_voter = current.clone(); + min_index = i; + min_direction = 0; + } + }) + }); + } + } + + // if the min edge is in the voter's sub-chain. + // [target, ..., X, Y, ... voter] + let target_chunk = target_root_path.len() - common_count; + let min_chain_in_voter = (min_index + min_direction as usize) > target_chunk; + + // walk over the cycle and update the weights + let mut should_inc_counter = true; + let start_operation_add = ((min_index % 2) + min_direction as usize) % 2 == 1; + let mut additional_removed = Vec::new(); + for i in 0..cycle.len() { + let current = cycle[i].borrow(); + if current.id.role == NodeRole::Voter { + let prev = cycle[prev_index(i)].borrow(); + assignments + .iter_mut() + .enumerate() + .filter(|(_, a)| a.who == current.id.who) + .for_each(|(target_ass_index, ass)| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == prev.id.who) + .map(|idx| { + let next_value = if i % 2 == 0 { + if start_operation_add { + ass.distribution[idx].1.saturating_add(min_value) + } else { + ass.distribution[idx].1.saturating_sub(min_value) + } + } else { + if start_operation_add { + ass.distribution[idx].1.saturating_sub(min_value) + } else { + ass.distribution[idx].1.saturating_add(min_value) + } + }; + + if next_value.is_zero() { + // if the removed edge is from the current assignment, dis_index + // should NOT be increased. + if target_ass_index == assignment_index { + should_inc_counter = false + } + ass.distribution.remove(idx); + num_changed += 1; + // only add if this is not the min itself. + if !(i == min_index && min_direction == 0) { + additional_removed.push(( + cycle[i].clone(), + cycle[prev_index(i)].clone(), + )); + } + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + + let next = cycle[next_index(i)].borrow(); + assignments + .iter_mut() + .enumerate() + .filter(|(_, a)| a.who == current.id.who) + .for_each(|(target_ass_index, ass)| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == next.id.who) + .map(|idx| { + let next_value = if i % 2 == 0 { + if start_operation_add { + ass.distribution[idx].1.saturating_sub(min_value) + } else { + ass.distribution[idx].1.saturating_add(min_value) + } + } else { + if start_operation_add { + ass.distribution[idx].1.saturating_add(min_value) + } else { + ass.distribution[idx].1.saturating_sub(min_value) + } + }; + + if next_value.is_zero() { + // if the removed edge is from the current assignment, dis_index + // should NOT be increased. + if target_ass_index == assignment_index { + should_inc_counter = false + } + ass.distribution.remove(idx); + num_changed += 1; + if !(i == min_index && min_direction == 1) { + additional_removed.push(( + cycle[i].clone(), + cycle[next_index(i)].clone(), + )); + } + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + } + } + + // don't do anything if the edge removed itself. This is always the first and last + // element + let should_reorg = !(min_index == (cycle.len() - 1) && min_direction == 1); + + // re-org. + if should_reorg { + let min_edge = vec![min_voter, min_target]; + if min_chain_in_voter { + // NOTE: safe; voter_root_path is always bigger than 1 element. + for i in 0..voter_root_path.len() - 1 { + let current = voter_root_path[i].clone().borrow().id.who.clone(); + let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); + if min_edge.contains(¤t) && min_edge.contains(&next) { + break; + } + Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); + } + Node::set_parent_of(&voter_node, &target_node); + } else { + // NOTE: safe; target_root_path is always bigger than 1 element. + for i in 0..target_root_path.len() - 1 { + let current = target_root_path[i].clone().borrow().id.who.clone(); + let next = target_root_path[i + 1].clone().borrow().id.who.clone(); + if min_edge.contains(¤t) && min_edge.contains(&next) { + break; + } + Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); + } + Node::set_parent_of(&target_node, &voter_node); + } + } + + // remove every other node which has collapsed to zero + for (r1, r2) in additional_removed { + if Node::is_parent_of(&r1, &r2) { + Node::remove_parent(&r1); + } else if Node::is_parent_of(&r2, &r1) { + Node::remove_parent(&r2); + } + } + + // increment the counter if needed. + if should_inc_counter { + dist_index += 1; + } + } + } + } + + num_changed +} + +/// Reduce the given [`PhragmenResult`]. This removes redundant edges from without changing the +/// overall backing of any of the elected candidates. +/// +/// Returns the number of edges removed. +/// +/// IMPORTANT: It is strictly assumed that the `who` attribute of all provided assignments are +/// unique. The result will most likely be corrupt otherwise. Furthermore, if the _distribution +/// vector_ contains duplicate ids, only the first instance is ever updates. +/// +/// O(min{ |Ew| ⋅ k + m3 , |Ew| ⋅ m }) +pub fn reduce(assignments: &mut Vec>) -> u32 where { + let mut num_changed = reduce_4(assignments); + num_changed += reduce_all(assignments); + num_changed +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn merging_works() { + // D <-- A <-- B <-- C + // + // F <-- E + let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); + let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); + let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); + let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); + let e = Node::new(NodeId::from(5, NodeRole::Target)).into_ref(); + let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&a, &d); + Node::set_parent_of(&e, &f); + + let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; + let path2 = vec![e.clone(), f.clone()]; + + merge(path1, path2); + // D <-- A <-- B <-- C + // | + // F --> E --> --> + assert_eq!(e.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c + } + + #[test] + fn merge_with_len_one() { + // D <-- A <-- B <-- C + // + // F <-- E + let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); + let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); + let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); + let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); + let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&a, &d); + + let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; + let path2 = vec![f.clone()]; + + merge(path1, path2); + // D <-- A <-- B <-- C + // | + // F --> --> + assert_eq!(f.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c + } + + #[test] + fn basic_reduce_4_cycle_works() { + use super::*; + + let assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 25), (20, 75)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 50), (20, 50)], + }, + ]; + + let mut new_assignments = assignments.clone(); + let num_reduced = reduce_4(&mut new_assignments); + + assert_eq!(num_reduced, 1); + assert_eq!( + new_assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(20, 100),], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 75), (20, 25),], + }, + ], + ); + } + + #[test] + fn basic_reduce_all_cycles_works() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + ]; + + assert_eq!(3, reduce_all(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + ], + ) + } + + #[test] + fn basic_reduce_works() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + ]; + + assert_eq!(3, reduce(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + ], + ) + } + + #[test] + fn should_deal_with_self_vote() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + // self vote from 10 and 20 to itself. + StakedAssignment { + who: 10, + distribution: vec![(10, 100)], + }, + StakedAssignment { + who: 20, + distribution: vec![(20, 200)], + }, + ]; + + assert_eq!(3, reduce(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + // should stay untouched. + StakedAssignment { + who: 10, + distribution: vec![(10, 100)] + }, + StakedAssignment { + who: 20, + distribution: vec![(20, 200)] + }, + ], + ) + } + + #[test] + fn reduce_3_common_votes_same_weight() { + let mut assignments = vec![ + StakedAssignment { + who: 4, + distribution: vec![ + ( + 1000000, + 100, + ), + ( + 1000002, + 100, + ), + ( + 1000004, + 100, + ), + ], + }, + StakedAssignment { + who: 5, + distribution: vec![ + ( + 1000000, + 100, + ), + ( + 1000002, + 100, + ), + ( + 1000004, + 100, + ), + ], + }, + ]; + + reduce_4(&mut assignments); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 4, + distribution: vec![(1000000, 200,), (1000004, 100,),], + }, + StakedAssignment { + who: 5, + distribution: vec![(1000002, 200,), (1000004, 100,),], + }, + ], + ) + } + + #[test] + #[should_panic] + fn reduce_panics_on_duplicate_voter() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10), (20, 10)], + }, + StakedAssignment { + who: 1, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 15)], + }, + ]; + + reduce(&mut assignments); + } + + #[test] + fn should_deal_with_duplicates_target() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 2, + distribution: vec![ + (10, 15), + (20, 15), + // duplicate + (10, 1), + // duplicate + (20, 1), + ], + }, + ]; + + reduce(&mut assignments); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 20),], + }, + StakedAssignment { + who: 2, + distribution: vec![ + (10, 10), + (20, 20), + // duplicate votes are silently ignored. + (10, 1), + (20, 1), + ], + }, + ], + ) + } + + #[test] + fn bound_should_be_kept() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(103, 72), (101, 53), (100, 83), (102, 38)], + }, + StakedAssignment { + who: 2, + distribution: vec![(103, 18), (101, 36), (102, 54), (100, 94)], + }, + StakedAssignment { + who: 3, + distribution: vec![(100, 96), (101, 35), (102, 52), (103, 69)], + }, + StakedAssignment { + who: 4, + distribution: vec![(102, 34), (100, 47), (103, 91), (101, 73)], + }, + ]; + + let winners = vec![103, 101, 100, 102]; + + let n = 4; + let m = winners.len() as u32; + let num_reduced = reduce_all(&mut assignments); + assert!(16 - num_reduced <= n + m); + } +} diff --git a/primitives/phragmen/src/tests.rs b/primitives/phragmen/src/tests.rs index 9027cc335fce73a6ea1a4b163ae1b6823ef4b914..e9861ede723cf9f31baf5f6c5d91b79e361b75d9 100644 --- a/primitives/phragmen/src/tests.rs +++ b/primitives/phragmen/src/tests.rs @@ -19,11 +19,12 @@ #![cfg(test)] use crate::mock::*; -use crate::{elect, PhragmenResult, PhragmenStakedAssignment, build_support_map, Support, equalize}; +use crate::{ + elect, equalize, build_support_map, is_score_better, + Support, StakedAssignment, Assignment, PhragmenResult, ExtendedBalance, +}; use substrate_test_utils::assert_eq_uvec; -use sp_runtime::Perbill; - -type Output = Perbill; +use sp_runtime::{Perbill, Permill, Percent, PerU16, traits::Convert}; #[test] fn float_phragmen_poc_works() { @@ -80,21 +81,33 @@ fn phragmen_poc_works() { (30, vec![2, 3]), ]; - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Output>( + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates, - voters, - create_stake_of(&[(10, 10), (20, 20), (30, 30)]), + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); assert_eq_uvec!( assignments, vec![ - (10, vec![(2, Perbill::from_percent(100))]), - (20, vec![(3, Perbill::from_percent(100))]), - (30, vec![(2, Perbill::from_percent(100/2)), (3, Perbill::from_percent(100/2))]), + Assignment { + who: 10u64, + distribution: vec![(2, Perbill::from_percent(100))], + }, + Assignment { + who: 20, + distribution: vec![(3, Perbill::from_percent(100))], + }, + Assignment { + who: 30, + distribution: vec![ + (2, Perbill::from_percent(100/2)), + (3, Perbill::from_percent(100/2)), + ], + }, ] ); } @@ -115,7 +128,10 @@ fn phragmen_poc_2_works() { (4, 500), ]); - run_and_compare(candidates, voters, stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] @@ -133,7 +149,10 @@ fn phragmen_poc_3_works() { (4, 1000), ]); - run_and_compare(candidates, voters, stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] @@ -149,17 +168,16 @@ fn phragmen_accuracy_on_large_scale_only_validators() { (5, (u64::max_value() - 2).into()), ]); - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates.clone(), - auto_generate_self_voters(&candidates), - stake_of, + auto_generate_self_voters(&candidates).iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(1, 18446744073709551614u128), (5, 18446744073709551613u128)]); assert_eq!(assignments.len(), 2); - check_assignments(assignments); + check_assignments_sum(assignments); } #[test] @@ -180,25 +198,36 @@ fn phragmen_accuracy_on_large_scale_validators_and_nominators() { (14, u64::max_value().into()), ]); - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(2, 36893488147419103226u128), (1, 36893488147419103219u128)]); assert_eq!( assignments, vec![ - (13, vec![(1, Perbill::one())]), - (14, vec![(2, Perbill::one())]), - (1, vec![(1, Perbill::one())]), - (2, vec![(2, Perbill::one())]), + Assignment { + who: 13u64, + distribution: vec![(1, Perbill::one())], + }, + Assignment { + who: 14, + distribution: vec![(2, Perbill::one())], + }, + Assignment { + who: 1, + distribution: vec![(1, Perbill::one())], + }, + Assignment { + who: 2, + distribution: vec![(2, Perbill::one())], + }, ] ); - check_assignments(assignments); + check_assignments_sum(assignments); } #[test] @@ -212,12 +241,11 @@ fn phragmen_accuracy_on_small_scale_self_vote() { (30, 1), ]); - let PhragmenResult { winners, assignments: _ } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments: _ } = elect::<_, _, TestCurrencyToVote, Perbill>( 3, 3, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); @@ -243,12 +271,11 @@ fn phragmen_accuracy_on_small_scale_no_self_vote() { (3, 1), ]); - let PhragmenResult { winners, assignments: _ } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments: _ } = elect::<_, _, TestCurrencyToVote, Perbill>( 3, 3, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); @@ -277,16 +304,15 @@ fn phragmen_large_scale_test() { (50, 990000000000000000), ]); - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(24, 1490000000000200000u128), (22, 1490000000000100000u128)]); - check_assignments(assignments); + check_assignments_sum(assignments); } #[test] @@ -304,24 +330,35 @@ fn phragmen_large_scale_test_2() { (50, nom_budget.into()), ]); - let PhragmenResult { winners, assignments } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments } = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq_uvec!(winners, vec![(2, 1000000000004000000u128), (4, 1000000000004000000u128)]); assert_eq!( assignments, vec![ - (50, vec![(2, Perbill::from_parts(500000001)), (4, Perbill::from_parts(499999999))]), - (2, vec![(2, Perbill::one())]), - (4, vec![(4, Perbill::one())]), + Assignment { + who: 50u64, + distribution: vec![ + (2, Perbill::from_parts(500000001)), + (4, Perbill::from_parts(499999999)) + ], + }, + Assignment { + who: 2, + distribution: vec![(2, Perbill::one())], + }, + Assignment { + who: 4, + distribution: vec![(4, Perbill::one())], + }, ], ); - check_assignments(assignments); + check_assignments_sum(assignments); } #[test] @@ -354,7 +391,7 @@ fn phragmen_linear_equalize() { (130, 1000), ]); - run_and_compare(candidates, voters, stake_of, 2, 2); + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] @@ -369,12 +406,11 @@ fn elect_has_no_entry_barrier() { (2, 10), ]); - let PhragmenResult { winners, assignments: _ } = elect::<_, _, _, TestCurrencyToVote, Output>( + let PhragmenResult { winners, assignments: _ } = elect::<_, _, TestCurrencyToVote, Perbill>( 3, 3, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); // 30 is elected with stake 0. The caller is responsible for stripping this. @@ -397,12 +433,11 @@ fn minimum_to_elect_is_respected() { (2, 10), ]); - let maybe_result = elect::<_, _, _, TestCurrencyToVote, Output>( + let maybe_result = elect::<_, _, TestCurrencyToVote, Perbill>( 10, 10, candidates, - voters, - stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ); assert!(maybe_result.is_none()); @@ -424,39 +459,39 @@ fn self_votes_should_be_kept() { (1, 8), ]); - let result = elect::<_, _, _, TestCurrencyToVote, Output>( + let result = elect::<_, _, TestCurrencyToVote, Perbill>( 2, 2, candidates, - voters, - &stake_of, + voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), ).unwrap(); assert_eq!(result.winners, vec![(20, 28), (10, 18)]); assert_eq!( result.assignments, vec![ - (10, vec![(10, Perbill::from_percent(100))]), - (20, vec![(20, Perbill::from_percent(100))]), - (1, vec![ + Assignment { who: 10, distribution: vec![(10, Perbill::from_percent(100))] }, + Assignment { who: 20, distribution: vec![(20, Perbill::from_percent(100))] }, + Assignment { who: 1, distribution: vec![ (10, Perbill::from_percent(50)), (20, Perbill::from_percent(50)) ] - ) + }, ], ); - let mut supports = build_support_map::< - Balance, - AccountId, - _, - TestCurrencyToVote, - Output, - >( - &result.winners.into_iter().map(|(who, _)| who).collect(), - &result.assignments, - &stake_of - ); + let staked_assignments: Vec> = result.assignments + .into_iter() + .map(|a| { + let stake = >::convert(stake_of(&a.who)) as ExtendedBalance; + a.into_staked(stake, true) + }).collect(); + + let winners = result.winners.into_iter().map(|(who, _)| who).collect::>(); + let (mut supports, _) = build_support_map::( + winners.as_slice(), + &staked_assignments, + ); assert_eq!(supports.get(&5u64), None); assert_eq!( @@ -468,23 +503,13 @@ fn self_votes_should_be_kept() { &Support { total: 24u128, voters: vec![(20u64, 20u128), (1u64, 4u128)] }, ); - let assignments = result.assignments; - let mut staked_assignments - : Vec<(AccountId, Vec>)> - = Vec::with_capacity(assignments.len()); - for (n, assignment) in assignments.iter() { - let mut staked_assignment - : Vec> - = Vec::with_capacity(assignment.len()); - let stake = stake_of(&n); - for (c, per_thing) in assignment.iter() { - let vote_stake = *per_thing * stake; - staked_assignment.push((c.clone(), vote_stake)); - } - staked_assignments.push((n.clone(), staked_assignment)); - } - - equalize::(staked_assignments, &mut supports, 0, 2usize, &stake_of); + equalize::( + staked_assignments, + &mut supports, + 0, + 2usize, + &stake_of, + ); assert_eq!( supports.get(&10u64).unwrap(), @@ -495,3 +520,462 @@ fn self_votes_should_be_kept() { &Support { total: 20u128, voters: vec![(20u64, 20u128)] }, ); } + +#[test] +fn assignment_convert_works() { + let staked = StakedAssignment { + who: 1 as AccountId, + distribution: vec![ + (20, 100 as Balance), + (30, 25), + ], + }; + + let assignment = staked.clone().into_assignment(true); + assert_eq!( + assignment, + Assignment { + who: 1, + distribution: vec![ + (20, Perbill::from_percent(80)), + (30, Perbill::from_percent(20)), + ] + } + ); + + assert_eq!( + assignment.into_staked(125, true), + staked, + ); +} + +#[test] +fn score_comparison_is_lexicographical() { + // only better in the fist parameter, worse in the other two ✅ + assert_eq!( + is_score_better([10, 20, 30], [12, 10, 35]), + true, + ); + + // worse in the first, better in the other two ❌ + assert_eq!( + is_score_better([10, 20, 30], [9, 30, 10]), + false, + ); + + // equal in the first, the second one dictates. + assert_eq!( + is_score_better([10, 20, 30], [10, 25, 40]), + true, + ); + + // equal in the first two, the last one dictates. + assert_eq!( + is_score_better([10, 20, 30], [10, 20, 40]), + false, + ); +} + +mod compact { + use codec::{Decode, Encode}; + use crate::generate_compact_solution_type; + use super::{AccountId, Balance}; + // these need to come from the same dev-dependency `sp-phragmen`, not from the crate. + use sp_phragmen::{Assignment, StakedAssignment, Error as PhragmenError}; + use sp_std::{convert::{TryInto, TryFrom}, fmt::Debug}; + use sp_runtime::Percent; + + type Accuracy = Percent; + + generate_compact_solution_type!(TestCompact, 16); + + #[test] + fn compact_struct_is_codec() { + let compact = TestCompact::<_, _, _> { + votes1: vec![(2u64, 20), (4, 40)], + votes2: vec![ + (1, (10, Accuracy::from_percent(80)), 11), + (5, (50, Accuracy::from_percent(85)), 51), + ], + ..Default::default() + }; + + let encoded = compact.encode(); + + assert_eq!( + compact, + Decode::decode(&mut &encoded[..]).unwrap(), + ); + } + + fn basic_ratio_test_with() where + V: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, + T: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + { + let voters = vec![ + 2 as AccountId, + 4, + 1, + 5, + 3, + ]; + let targets = vec![ + 10 as AccountId, + 11, + 20, // 2 + 30, + 31, // 4 + 32, + 40, // 6 + 50, + 51, // 8 + ]; + + let assignments = vec![ + Assignment { + who: 2 as AccountId, + distribution: vec![(20u64, Accuracy::from_percent(100))] + }, + Assignment { + who: 4, + distribution: vec![(40, Accuracy::from_percent(100))], + }, + Assignment { + who: 1, + distribution: vec![ + (10, Accuracy::from_percent(80)), + (11, Accuracy::from_percent(20)) + ], + }, + Assignment { + who: 5, + distribution: vec![ + (50, Accuracy::from_percent(85)), + (51, Accuracy::from_percent(15)), + ] + }, + Assignment { + who: 3, + distribution: vec![ + (30, Accuracy::from_percent(50)), + (31, Accuracy::from_percent(25)), + (32, Accuracy::from_percent(25)), + ], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + let target_index = |a: &AccountId| -> Option { + targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + + let compacted = >::from_assignment( + assignments.clone(), + voter_index, + target_index, + ).unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: vec![(V::from(0u8), T::from(2u8)), (V::from(1u8), T::from(6u8))], + votes2: vec![ + (V::from(2u8), (T::from(0u8), Accuracy::from_percent(80)), T::from(1u8)), + (V::from(3u8), (T::from(7u8), Accuracy::from_percent(85)), T::from(8u8)), + ], + votes3: vec![ + ( + V::from(4), + [(T::from(3u8), Accuracy::from_percent(50)), (T::from(4u8), Accuracy::from_percent(25))], + T::from(5u8), + ), + ], + ..Default::default() + } + ); + + let voter_at = |a: V| -> Option { voters.get(>::try_into(a).unwrap()).cloned() }; + let target_at = |a: T| -> Option { targets.get(>::try_into(a).unwrap()).cloned() }; + + assert_eq!( + compacted.into_assignment(voter_at, target_at).unwrap(), + assignments, + ); + } + + #[test] + fn basic_from_and_into_compact_works_assignments() { + basic_ratio_test_with::(); + basic_ratio_test_with::(); + basic_ratio_test_with::(); + } + + #[test] + fn basic_from_and_into_compact_works_staked_assignments() { + let voters = vec![ + 2 as AccountId, + 4, + 1, + 5, + 3, + ]; + let targets = vec![ + 10 as AccountId, 11, + 20, + 30, 31, 32, + 40, + 50, 51, + ]; + + let assignments = vec![ + StakedAssignment { + who: 2 as AccountId, + distribution: vec![(20, 100 as Balance)] + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 100)], + }, + StakedAssignment { + who: 1, + distribution: vec![ + (10, 80), + (11, 20) + ], + }, + StakedAssignment { + who: 5, distribution: + vec![ + (50, 85), + (51, 15), + ] + }, + StakedAssignment { + who: 3, + distribution: vec![ + (30, 50), + (31, 25), + (32, 25), + ], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + let target_index = |a: &AccountId| -> Option { + targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + + let compacted = >::from_staked( + assignments.clone(), + voter_index, + target_index, + ).unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: vec![(0, 2), (1, 6)], + votes2: vec![ + (2, (0, 80), 1), + (3, (7, 85), 8), + ], + votes3: vec![ + (4, [(3, 50), (4, 25)], 5), + ], + ..Default::default() + } + ); + + let max_of_fn = |_: &AccountId| -> Balance { 100u128 }; + let voter_at = |a: u16| -> Option { voters.get(a as usize).cloned() }; + let target_at = |a: u16| -> Option { targets.get(a as usize).cloned() }; + + assert_eq!( + compacted.into_staked( + max_of_fn, + voter_at, + target_at, + ).unwrap(), + assignments, + ); + } + + #[test] + fn compact_into_stake_must_report_overflow() { + // The last edge which is computed from the rest should ALWAYS be positive. + // in votes2 + let compact = TestCompact:: { + votes1: Default::default(), + votes2: vec![(0, (1, 10), 2)], + ..Default::default() + }; + + let entity_at = |a: u16| -> Option { Some(a as AccountId) }; + let max_of = |_: &AccountId| -> Balance { 5 }; + + assert_eq!( + compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // in votes3 onwards + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + votes3: vec![(0, [(1, 7), (2, 8)], 3)], + ..Default::default() + }; + + assert_eq!( + compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // Also if equal + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + // 5 is total, we cannot leave none for 30 here. + votes3: vec![(0, [(1, 3), (2, 2)], 3)], + ..Default::default() + }; + + assert_eq!( + compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + } + + #[test] + fn compact_into_assignment_must_report_overflow() { + // in votes2 + let compact = TestCompact:: { + votes1: Default::default(), + votes2: vec![(0, (1, Accuracy::from_percent(100)), 2)], + ..Default::default() + }; + + let entity_at = |a: u16| -> Option { Some(a as AccountId) }; + + assert_eq!( + compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // in votes3 onwards + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + votes3: vec![(0, [(1, Accuracy::from_percent(70)), (2, Accuracy::from_percent(80))], 3)], + ..Default::default() + }; + + assert_eq!( + compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + } + + #[test] + fn target_count_overflow_is_detected() { + let assignments = vec![ + StakedAssignment { + who: 1 as AccountId, + distribution: (10..26).map(|i| (i as AccountId, i as Balance)).collect::>(), + }, + ]; + + let entity_index = |a: &AccountId| -> Option { Some(*a as u16) }; + + let compacted = >::from_staked( + assignments.clone(), + entity_index, + entity_index, + ); + + assert!(compacted.is_ok()); + + let assignments = vec![ + StakedAssignment { + who: 1 as AccountId, + distribution: (10..27).map(|i| (i as AccountId, i as Balance)).collect::>(), + }, + ]; + + let compacted = >::from_staked( + assignments.clone(), + entity_index, + entity_index, + ); + + assert_eq!( + compacted.unwrap_err(), + PhragmenError::CompactTargetOverflow, + ); + + let assignments = vec![ + Assignment { + who: 1 as AccountId, + distribution: (10..27).map(|i| (i as AccountId, Percent::from_parts(i as u8))).collect::>(), + }, + ]; + + let compacted = >::from_assignment( + assignments.clone(), + entity_index, + entity_index, + ); + + assert_eq!( + compacted.unwrap_err(), + PhragmenError::CompactTargetOverflow, + ); + } + + #[test] + fn zero_target_count_is_ignored() { + let voters = vec![1 as AccountId, 2]; + let targets = vec![10 as AccountId, 11]; + + let assignments = vec![ + StakedAssignment { + who: 1 as AccountId, + distribution: vec![(10, 100 as Balance), (11, 100)] + }, + StakedAssignment { + who: 2, + distribution: vec![], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + let target_index = |a: &AccountId| -> Option { + targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() + }; + + let compacted = >::from_staked( + assignments.clone(), + voter_index, + target_index, + ).unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: Default::default(), + votes2: vec![(0, (0, 100), 1)], + ..Default::default() + } + ); + } +} diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 13d91e71d34e47e51644b61fdac38d681b4beae4..a7975fb4754b9ee049b7f76f45ca0876a4b91006 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-rpc" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,7 +10,10 @@ description = "Substrate RPC primitives and utilities." [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", path = "../core" } +sp-core = { version = "2.0.0-alpha.5", path = "../core" } [dev-dependencies] serde_json = "1.0.41" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index 70092c0587fe993e758e78dd74f1423ec204d803..01f7f2f19c98169dbebd86e841fd37473ea6d011 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,19 +10,19 @@ description = "Substrate runtime interface" documentation = "https://docs.rs/sp-runtime-interface/" [dependencies] -sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-runtime-interface-proc-macro = { version = "2.0.0-alpha.2", path = "proc-macro" } -sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-wasm-interface = { version = "2.0.0-alpha.5", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-runtime-interface-proc-macro = { version = "2.0.0-alpha.5", path = "proc-macro" } +sp-externalities = { version = "0.8.0-alpha.5", optional = true, path = "../externalities" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } static_assertions = "1.0.0" -primitive-types = { version = "0.6.2", default-features = false } +primitive-types = { version = "0.7.0", default-features = false } [dev-dependencies] sp-runtime-interface-test-wasm = { version = "2.0.0-dev", path = "test-wasm" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0-alpha.2", path = "../core" } -sp-io = { version = "2.0.0-alpha.2", path = "../io" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0-alpha.5", path = "../core" } +sp-io = { version = "2.0.0-alpha.5", path = "../io" } rustversion = "1.0.0" trybuild = "1.0.23" @@ -43,3 +43,6 @@ std = [ # Disables static assertions in `impls.rs` that checks the word size. To prevent any footgun, the # check is changed into a runtime check. disable_target_static_assertions = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index 3743b2e09a153d5734250b131b56125040516194..6d0b7ee5fb7262c4a55922c0b1cef6549424c935 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -18,3 +18,6 @@ quote = "1.0.3" proc-macro2 = "1.0.3" Inflector = "0.11.4" proc-macro-crate = "0.1.4" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index bdddc5eba70e79f1ebf908acc3ec9f83bf6cceb1..e7c34fbf9934211df7ea874e08d2d4382b1c7fe2 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -30,7 +30,7 @@ use crate::utils::{ generate_crate_access, create_exchangeable_host_function_ident, get_function_arguments, - get_function_argument_names, get_trait_methods, + get_function_argument_names, get_runtime_interface, create_function_ident_with_version, }; use syn::{ @@ -47,19 +47,40 @@ use std::iter; /// of the trait method. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_name = &trait_def.ident; - get_trait_methods(trait_def).try_fold(TokenStream::new(), |mut t, m| { - t.extend(function_for_method(trait_name, m, is_wasm_only)?); + let runtime_interface = get_runtime_interface(trait_def)?; + + // latest version dispatch + let token_stream: Result = runtime_interface.latest_versions() + .try_fold( + TokenStream::new(), + |mut t, (latest_version, method)| { + t.extend(function_for_method(method, latest_version, is_wasm_only)?); + Ok(t) + } + ); + + // earlier versions compatibility dispatch (only std variant) + let result: Result = runtime_interface.all_versions().try_fold(token_stream?, |mut t, (version, method)| + { + t.extend(function_std_impl(trait_name, method, version, is_wasm_only)?); Ok(t) - }) + }); + + result } /// Generates the bare function implementation for the given method for the host and wasm side. fn function_for_method( - trait_name: &Ident, method: &TraitItemMethod, + latest_version: u32, is_wasm_only: bool, ) -> Result { - let std_impl = function_std_impl(trait_name, method, is_wasm_only)?; + let std_impl = if !is_wasm_only { + function_std_latest_impl(method, latest_version)? + } else { + quote!() + }; + let no_std_impl = function_no_std_impl(method)?; Ok( @@ -78,7 +99,7 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { let args = get_function_arguments(&method.sig); let arg_names = get_function_argument_names(&method.sig); let return_value = &method.sig.output; - let attrs = &method.attrs; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); Ok( quote! { @@ -92,13 +113,40 @@ fn function_no_std_impl(method: &TraitItemMethod) -> Result { ) } +/// Generate call to latest function version for `cfg((feature = "std")` +/// +/// This should generate simple `fn func(..) { func_version_(..) }`. +fn function_std_latest_impl( + method: &TraitItemMethod, + latest_version: u32, +) -> Result { + let function_name = &method.sig.ident; + let args = get_function_arguments(&method.sig).map(FnArg::Typed); + let arg_names = get_function_argument_names(&method.sig).collect::>(); + let return_value = &method.sig.output; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + let latest_function_name = create_function_ident_with_version(&method.sig.ident, latest_version); + + Ok(quote_spanned! { method.span() => + #[cfg(feature = "std")] + #( #attrs )* + pub fn #function_name( #( #args, )* ) #return_value { + #latest_function_name( + #( #arg_names, )* + ) + } + }) +} + /// Generates the bare function implementation for `cfg(feature = "std")`. fn function_std_impl( trait_name: &Ident, method: &TraitItemMethod, + version: u32, is_wasm_only: bool, ) -> Result { - let function_name = &method.sig.ident; + let function_name = create_function_ident_with_version(&method.sig.ident, version); + let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig).map(FnArg::Typed).chain( // Add the function context as last parameter when this is a wasm only interface. @@ -115,16 +163,15 @@ fn function_std_impl( ).take(1), ); let return_value = &method.sig.output; - let attrs = &method.attrs; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); // Don't make the function public accessible when this is a wasm only interface. - let vis = if is_wasm_only { quote!() } else { quote!(pub) }; - let call_to_trait = generate_call_to_trait(trait_name, method, is_wasm_only); + let call_to_trait = generate_call_to_trait(trait_name, method, version, is_wasm_only); Ok( quote_spanned! { method.span() => #[cfg(feature = "std")] #( #attrs )* - #vis fn #function_name( #( #args, )* ) #return_value { + fn #function_name( #( #args, )* ) #return_value { #call_to_trait } } @@ -135,10 +182,11 @@ fn function_std_impl( fn generate_call_to_trait( trait_name: &Ident, method: &TraitItemMethod, + version: u32, is_wasm_only: bool, ) -> TokenStream { let crate_ = generate_crate_access(); - let method_name = &method.sig.ident; + let method_name = create_function_ident_with_version(&method.sig.ident, version); let expect_msg = format!( "`{}` called outside of an Externalities-provided environment.", method_name, diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 42bfe3c712b9eabf2caefef70f252aaf3f24ce18..205ee87105c41c9a1d5166f2a6b182ae28dd308b 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -23,13 +23,13 @@ use crate::utils::{ generate_crate_access, create_host_function_ident, get_function_argument_names, get_function_argument_types_without_ref, get_function_argument_types_ref_and_mut, - get_function_argument_names_and_types_without_ref, get_trait_methods, get_function_arguments, - get_function_argument_types, create_exchangeable_host_function_ident, + get_function_argument_names_and_types_without_ref, get_function_arguments, + get_function_argument_types, create_exchangeable_host_function_ident, get_runtime_interface, + create_function_ident_with_version, }; use syn::{ - ItemTrait, TraitItemMethod, Result, ReturnType, Ident, TraitItem, Pat, Error, Signature, - spanned::Spanned, + ItemTrait, TraitItemMethod, Result, ReturnType, Ident, Pat, Error, Signature, spanned::Spanned, }; use proc_macro2::{TokenStream, Span}; @@ -44,13 +44,15 @@ use std::iter::{Iterator, self}; /// implementations for the host functions on the host. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_name = &trait_def.ident; - let extern_host_function_impls = get_trait_methods(trait_def) - .try_fold(TokenStream::new(), |mut t, m| { - t.extend(generate_extern_host_function(m, trait_name)?); + let extern_host_function_impls = get_runtime_interface(trait_def)? + .latest_versions() + .try_fold(TokenStream::new(), |mut t, (version, method)| { + t.extend(generate_extern_host_function(method, version, trait_name)?); Ok::<_, Error>(t) })?; - let exchangeable_host_functions = get_trait_methods(trait_def) - .try_fold(TokenStream::new(), |mut t, m| { + let exchangeable_host_functions = get_runtime_interface(trait_def)? + .latest_versions() + .try_fold(TokenStream::new(), |mut t, (_, m)| { t.extend(generate_exchangeable_host_function(m)?); Ok::<_, Error>(t) })?; @@ -76,7 +78,7 @@ pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result Result { +fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_name: &Ident) -> Result { let crate_ = generate_crate_access(); let args = get_function_arguments(&method.sig); let arg_types = get_function_argument_types_without_ref(&method.sig); @@ -85,7 +87,7 @@ fn generate_extern_host_function(method: &TraitItemMethod, trait_name: &Ident) - let arg_names2 = get_function_argument_names(&method.sig); let arg_names3 = get_function_argument_names(&method.sig); let function = &method.sig.ident; - let ext_function = create_host_function_ident(&method.sig.ident, trait_name); + let ext_function = create_host_function_ident(&method.sig.ident, version, trait_name); let doc_string = format!( " Default extern host function implementation for [`super::{}`].", method.sig.ident, @@ -157,14 +159,12 @@ fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result Result { let crate_ = generate_crate_access(); - let host_functions = trait_def - .items - .iter() - .filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), - _ => None, - }) - .map(|m| generate_host_function_implementation(&trait_def.ident, m, is_wasm_only)) + + let host_functions = get_runtime_interface(trait_def)? + .all_versions() + .map(|(version, method)| + generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) + ) .collect::>>()?; Ok( @@ -191,9 +191,10 @@ fn generate_host_functions_struct(trait_def: &ItemTrait, is_wasm_only: bool) -> fn generate_host_function_implementation( trait_name: &Ident, method: &TraitItemMethod, + version: u32, is_wasm_only: bool, ) -> Result { - let name = create_host_function_ident(&method.sig.ident, trait_name).to_string(); + let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); let crate_ = generate_crate_access(); let signature = generate_wasm_interface_signature_for_host_function(&method.sig)?; @@ -202,7 +203,7 @@ fn generate_host_function_implementation( trait_name, ).collect::>>()?; let ffi_to_host_values = generate_ffi_to_host_value(&method.sig).collect::>>()?; - let host_function_call = generate_host_function_call(&method.sig, is_wasm_only); + let host_function_call = generate_host_function_call(&method.sig, version, is_wasm_only); let into_preallocated_ffi_value = generate_into_preallocated_ffi_value(&method.sig)?; let convert_return_value = generate_return_value_into_wasm_value(&method.sig); @@ -211,7 +212,6 @@ fn generate_host_function_implementation( { struct #struct_name; - #[allow(unused)] impl #crate_::sp_wasm_interface::Function for #struct_name { fn name(&self) -> &str { #name @@ -322,8 +322,8 @@ fn generate_ffi_to_host_value<'a>( } /// Generate the code to call the host function and the ident that stores the result. -fn generate_host_function_call(sig: &Signature, is_wasm_only: bool) -> TokenStream { - let host_function_name = &sig.ident; +fn generate_host_function_call(sig: &Signature, version: u32, is_wasm_only: bool) -> TokenStream { + let host_function_name = create_function_ident_with_version(&sig.ident, version); let result_var_name = generate_host_function_result_var_name(&sig.ident); let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| ram.map(|(vr, vm)| quote!(#vr #vm)) diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index af71ba324172157be76ee094b6682fe880bc1b2d..542c4ca4b8c37b3519a4bb103897c80d83145b4d 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -17,10 +17,15 @@ //! Checks the trait declaration, makes the trait declaration module local, removes all method //! default implementations and implements the trait for `&mut dyn Externalities`. -use crate::utils::{generate_crate_access, get_function_argument_types_without_ref}; +use crate::utils::{ + generate_crate_access, + get_function_argument_types_without_ref, + get_runtime_interface, + create_function_ident_with_version, +}; use syn::{ - ItemTrait, TraitItemMethod, Result, TraitItem, Error, fold::{self, Fold}, spanned::Spanned, + ItemTrait, TraitItemMethod, Result, Error, fold::{self, Fold}, spanned::Spanned, Visibility, Receiver, Type, Generics, }; @@ -32,7 +37,7 @@ use quote::quote; /// essential definition and implement this essential definition for `dyn Externalities`. pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let impl_trait = impl_trait_for_externalities(trait_def, is_wasm_only)?; - let essential_trait_def = ToEssentialTraitDef::convert(trait_def.clone())?; + let essential_trait_def = declare_essential_trait(trait_def)?; Ok( quote! { @@ -48,29 +53,35 @@ pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result struct ToEssentialTraitDef { /// All errors found while doing the conversion. errors: Vec, + methods: Vec, } impl ToEssentialTraitDef { - /// Convert the given trait definition to the essential trait definition. - fn convert(trait_def: ItemTrait) -> Result { - let mut folder = ToEssentialTraitDef { - errors: Vec::new(), - }; - - let res = folder.fold_item_trait(trait_def); + fn new() -> Self { + ToEssentialTraitDef { errors: vec![], methods: vec![] } + } - if let Some(first_error) = folder.errors.pop() { + fn into_methods(self) -> Result> { + let mut errors = self.errors; + let methods = self.methods; + if let Some(first_error) = errors.pop() { Err( - folder.errors.into_iter().fold(first_error, |mut o, n| { + errors.into_iter().fold(first_error, |mut o, n| { o.combine(n); o }) ) } else { - Ok(res) + Ok(methods) } } + fn process(&mut self, method: &TraitItemMethod, version: u32) { + let mut folded = self.fold_trait_item_method(method.clone()); + folded.sig.ident = create_function_ident_with_version(&folded.sig.ident, version); + self.methods.push(folded); + } + fn push_error(&mut self, span: &S, msg: &str) { self.errors.push(Error::new(span.span(), msg)); } @@ -98,6 +109,8 @@ impl Fold for ToEssentialTraitDef { self.error_on_generic_parameters(&method.sig.generics); + method.attrs.retain(|a| !a.path.is_ident("version")); + fold::fold_trait_item_method(self, method) } @@ -117,17 +130,40 @@ impl Fold for ToEssentialTraitDef { } } +fn declare_essential_trait(trait_def: &ItemTrait) -> Result { + let trait_ = &trait_def.ident; + + if let Some(param) = trait_def.generics.params.first() { + return Err(Error::new(param.span(), "Generic parameters not supported.")) + } + + let interface = get_runtime_interface(trait_def)?; + let mut folder = ToEssentialTraitDef::new(); + for (version, interface_method) in interface.all_versions() { + folder.process(interface_method, version); + } + let methods = folder.into_methods()?; + + Ok( + quote! { + trait #trait_ { + #( #methods )* + } + } + ) +} + /// Implements the given trait definition for `dyn Externalities`. fn impl_trait_for_externalities(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { let trait_ = &trait_def.ident; let crate_ = generate_crate_access(); - let methods = trait_def - .items - .iter() - .filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), - _ => None, - }); + let interface = get_runtime_interface(trait_def)?; + let methods = interface.all_versions().map(|(version, method)| { + let mut cloned = method.clone(); + cloned.attrs.retain(|a| !a.path.is_ident("version")); + cloned.sig.ident = create_function_ident_with_version(&cloned.sig.ident, version); + cloned + }); let impl_type = if is_wasm_only { quote!( &mut dyn #crate_::sp_wasm_interface::FunctionContext ) diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 15c65f11caf2932bb1c77d51233164f69045b58f..45f66e3bf6525569d9ed6c5f9209e350bbe97cd2 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -20,17 +20,60 @@ use proc_macro2::{TokenStream, Span}; use syn::{ Ident, Error, Signature, Pat, PatType, FnArg, Type, token, TraitItemMethod, ItemTrait, - TraitItem, parse_quote, spanned::Spanned, + TraitItem, parse_quote, spanned::Spanned, Result, Meta, NestedMeta, Lit, Attribute, }; use proc_macro_crate::crate_name; use std::env; +use std::collections::{BTreeMap, btree_map::Entry}; use quote::quote; use inflector::Inflector; +/// Runtime interface function with all associated versions of this function. +pub struct RuntimeInterfaceFunction<'a> { + latest_version: u32, + versions: BTreeMap, +} + +impl<'a> RuntimeInterfaceFunction<'a> { + fn new(version: u32, trait_item: &'a TraitItemMethod) -> Self { + Self { + latest_version: version, + versions: { + let mut res = BTreeMap::new(); + res.insert(version, trait_item); + res + }, + } + } + + pub fn latest_version(&self) -> (u32, &TraitItemMethod) { + ( + self.latest_version, + self.versions.get(&self.latest_version) + .expect("If latest_version has a value, the key with this value is in the versions; qed") + ) + } +} + +/// All functions of a runtime interface grouped by the function names. +pub struct RuntimeInterface<'a> { + items: BTreeMap>, +} + +impl<'a> RuntimeInterface<'a> { + pub fn latest_versions(&self) -> impl Iterator { + self.items.iter().map(|(_, item)| item.latest_version()) + } + + pub fn all_versions(&self) -> impl Iterator { + self.items.iter().flat_map(|(_, item)| item.versions.iter()).map(|(v, i)| (*v, *i)) + } + } + /// Generates the include for the runtime-interface crate. pub fn generate_runtime_interface_include() -> TokenStream { if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { @@ -67,12 +110,25 @@ pub fn create_exchangeable_host_function_ident(name: &Ident) -> Ident { } /// Create the host function identifier for the given function name. -pub fn create_host_function_ident(name: &Ident, trait_name: &Ident) -> Ident { +pub fn create_host_function_ident(name: &Ident, version: u32, trait_name: &Ident) -> Ident { Ident::new( &format!( - "ext_{}_{}_version_1", + "ext_{}_{}_version_{}", trait_name.to_string().to_snake_case(), name, + version, + ), + Span::call_site(), + ) +} + +/// Create the host function identifier for the given function name. +pub fn create_function_ident_with_version(name: &Ident, version: u32) -> Ident { + Ident::new( + &format!( + "{}_version_{}", + name, + version, ), Span::call_site(), ) @@ -151,7 +207,7 @@ pub fn get_function_argument_types_ref_and_mut<'a>( } /// Returns an iterator over all trait methods for the given trait definition. -pub fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator { +fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator { trait_def .items .iter() @@ -160,3 +216,85 @@ pub fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator None, }) } + +/// Parse version attribute. +/// +/// Returns error if it is in incorrent format. Correct format is only `#[version(X)]`. +fn parse_version_attribute(version: &Attribute) -> Result { + let meta = version.parse_meta()?; + + let err = Err(Error::new( + meta.span(), + "Unexpected `version` attribute. The supported format is `#[version(1)]`", + ) + ); + + match meta { + Meta::List(list) => { + if list.nested.len() != 1 { + err + } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { + i.base10_parse() + } else { + err + } + }, + _ => err, + } +} + +/// Return item version (`#[version(X)]`) attribute, if present. +fn get_item_version(item: &TraitItemMethod) -> Result> { + item.attrs.iter().find(|attr| attr.path.is_ident("version")) + .map(|attr| parse_version_attribute(attr)) + .transpose() +} + +/// Returns all runtime interface members, with versions. +pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) + -> Result> +{ + let mut functions: BTreeMap> = BTreeMap::new(); + + for item in get_trait_methods(trait_def) { + let name = item.sig.ident.clone(); + let version = get_item_version(item)?.unwrap_or(1); + + match functions.entry(name.clone()) { + Entry::Vacant(entry) => { entry.insert(RuntimeInterfaceFunction::new(version, item)); }, + Entry::Occupied(mut entry) => { + if let Some(existing_item) = entry.get().versions.get(&version) { + let mut err = Error::new( + item.span(), + "Duplicated version attribute", + ); + err.combine(Error::new( + existing_item.span(), + "Previous version with the same number defined here", + )); + + return Err(err); + } + + let interface_item = entry.get_mut(); + if interface_item.latest_version < version { interface_item.latest_version = version; } + interface_item.versions.insert(version, item); + } + } + } + + for function in functions.values() { + let mut next_expected = 1; + for (version, item) in function.versions.iter() { + if next_expected != *version { + return Err(Error::new( + item.span(), + format!("Unexpected version attribute: missing version '{}' for this function", next_expected), + )); + } + next_expected += 1; + } + } + + Ok(RuntimeInterface { items: functions }) +} \ No newline at end of file diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index 609f4f600b784ecdd8f30ca885256ad310f11e33..fd158d4b8aa9609df1206b2ce5a640e95b5e3092 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -129,11 +129,22 @@ pub use sp_std; /// /// A function that can be called from native/wasm. /// /// /// /// The implementation given to this function is only compiled on native. -/// fn call_some_complex_code(data: &[u8]) -> Vec { +/// fn call(data: &[u8]) -> Vec { /// // Here you could call some rather complex code that only compiles on native or /// // is way faster in native than executing it in wasm. /// Vec::new() /// } +/// /// Call function, but different version. +/// /// +/// /// For new runtimes, only function with latest version is reachable. +/// /// But old version (above) is still accessible for old runtimes. +/// /// Default version is 1. +/// #[version(2)] +/// fn call(data: &[u8]) -> Vec { +/// // Here you could call some rather complex code that only compiles on native or +/// // is way faster in native than executing it in wasm. +/// [17].to_vec() +/// } /// /// /// A function can take a `&self` or `&mut self` argument to get access to the /// /// `Externalities`. (The generated method does not require @@ -157,13 +168,15 @@ pub use sp_std; /// // on the visibility of the trait declaration. /// mod interface { /// trait Interface { -/// fn call_some_complex_code(data: &[u8]) -> Vec; -/// fn set_or_clear(&mut self, optional: Option>); +/// fn call_version_1(data: &[u8]) -> Vec; +/// fn call_version_2(data: &[u8]) -> Vec; +/// fn set_or_clear_version_1(&mut self, optional: Option>); /// } /// /// impl Interface for &mut dyn sp_externalities::Externalities { -/// fn call_some_complex_code(data: &[u8]) -> Vec { Vec::new() } -/// fn set_or_clear(&mut self, optional: Option>) { +/// fn call_version_1(data: &[u8]) -> Vec { Vec::new() } +/// fn call_version_2(data: &[u8]) -> Vec { [17].to_vec() } +/// fn set_or_clear_version_1(&mut self, optional: Option>) { /// match optional { /// Some(value) => self.set_storage([1, 2, 3, 4].to_vec(), value), /// None => self.clear_storage(&[1, 2, 3, 4]), @@ -171,12 +184,25 @@ pub use sp_std; /// } /// } /// -/// pub fn call_some_complex_code(data: &[u8]) -> Vec { -/// <&mut dyn sp_externalities::Externalities as Interface>::call_some_complex_code(data) +/// pub fn call(data: &[u8]) -> Vec { +/// // only latest version is exposed +/// call_version_2(data) +/// } +/// +/// fn call_version_1(data: &[u8]) -> Vec { +/// <&mut dyn sp_externalities::Externalities as Interface>::call_version_1(data) +/// } +/// +/// fn call_version_2(data: &[u8]) -> Vec { +/// <&mut dyn sp_externalities::Externalities as Interface>::call_version_2(data) /// } /// /// pub fn set_or_clear(optional: Option>) { -/// sp_externalities::with_externalities(|mut ext| Interface::set_or_clear(&mut ext, optional)) +/// set_or_clear_version_1(optional) +/// } +/// +/// fn set_or_clear_version_1(optional: Option>) { +/// sp_externalities::with_externalities(|mut ext| Interface::set_or_clear_version_1(&mut ext, optional)) /// .expect("`set_or_clear` called outside of an Externalities-provided environment.") /// } /// @@ -205,7 +231,7 @@ pub use sp_std; /// /// `::FFIType`. /// /// /// /// `data` holds the pointer and the length to the `[u8]` slice. -/// pub fn ext_Interface_call_some_complex_code_version_1(data: u64) -> u64; +/// pub fn ext_Interface_call_version_1(data: u64) -> u64; /// /// `optional` holds the pointer and the length of the encoded value. /// pub fn ext_Interface_set_or_clear_version_1(optional: u64); /// } @@ -213,18 +239,18 @@ pub use sp_std; /// /// /// The type is actually `ExchangeableFunction` (from `sp-runtime-interface`). /// /// -/// /// This can be used to replace the implementation of the `call_some_complex_code` function. +/// /// This can be used to replace the implementation of the `call` function. /// /// Instead of calling into the host, the callee will automatically call the other /// /// implementation. /// /// /// /// To replace the implementation: /// /// -/// /// `host_call_some_complex_code.replace_implementation(some_other_impl)` -/// pub static host_call_some_complex_code: () = (); +/// /// `host_call.replace_implementation(some_other_impl)` +/// pub static host_call: () = (); /// pub static host_set_or_clear: () = (); /// -/// pub fn call_some_complex_code(data: &[u8]) -> Vec { -/// // This is the actual call: `host_call_some_complex_code.get()(data)` +/// pub fn call(data: &[u8]) -> Vec { +/// // This is the actual call: `host_call.get()(data)` /// // /// // But that does not work for several reasons in this example, so we just return an /// // empty vector. diff --git a/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..6f2d66bd77c4b2d672c54d3dc6f760830ccc570b --- /dev/null +++ b/primitives/runtime-interface/test-wasm-deprecated/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "sp-runtime-interface-test-wasm-deprecated" +version = "2.0.0-dev" +authors = ["Parity Technologies "] +edition = "2018" +build = "build.rs" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false + +[dependencies] +sp-runtime-interface = { version = "2.0.0-alpha.5", default-features = false, path = "../" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../core" } + +[build-dependencies] +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } + +[features] +default = [ "std" ] +std = [ "sp-runtime-interface/std", "sp-std/std", "sp-core/std", "sp-io/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs new file mode 100644 index 0000000000000000000000000000000000000000..647b4768141d2203155bf5325f9ff87f7d9c2446 --- /dev/null +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -0,0 +1,26 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use wasm_builder_runner::WasmBuilder; + +fn main() { + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() +} diff --git a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..29d28c75faafb9b4864479568c819e9bc3178477 --- /dev/null +++ b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -0,0 +1,52 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Tests for the runtime interface traits and proc macros. + +#![cfg_attr(not(feature = "std"), no_std)] + +use sp_core::wasm_export_functions; +use sp_runtime_interface::runtime_interface; + +// Include the WASM binary +#[cfg(feature = "std")] +include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); + +/// This function is not used, but we require it for the compiler to include `sp-io`. +/// `sp-io` is required for its panic and oom handler. +#[no_mangle] +pub fn import_sp_io() { + sp_io::misc::print_utf8(&[]); +} + +#[runtime_interface] +pub trait TestApi { + fn test_versionning(&self, _data: u32) -> bool { + // should not be called + unimplemented!() + } +} + +wasm_export_functions! { + fn test_versionning_works() { + // old api allows only 42 and 50 + assert!(test_api::test_versionning(42)); + assert!(test_api::test_versionning(50)); + + assert!(!test_api::test_versionning(142)); + assert!(!test_api::test_versionning(0)); + } +} \ No newline at end of file diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index b322e529d66735ea9e0418d0a3c2cf932c17eff3..4eb4f01c9f0c810ab42f9016e1f7db66147e1f65 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -10,10 +10,10 @@ repository = "https://github.com/paritytech/substrate/" publish = false [dependencies] -sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-alpha.5", default-features = false, path = "../" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../core" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } @@ -21,3 +21,6 @@ wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-run [features] default = [ "std" ] std = [ "sp-runtime-interface/std", "sp-std/std", "sp-core/std", "sp-io/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index ee7120b1b8cb59e17e2e524c86233afafda2c6f7..700c77854a8b3e003c56c1b25d2c0ba904550fd1 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -21,7 +21,7 @@ use sp_runtime_interface::runtime_interface; #[cfg(not(feature = "std"))] -use sp_std::{vec, vec::Vec, mem, convert::TryFrom}; +use sp_std::{prelude::*, mem, convert::TryFrom}; use sp_core::{sr25519::Public, wasm_export_functions}; @@ -103,6 +103,15 @@ pub trait TestApi { fn get_and_return_i128(val: i128) -> i128 { val } + + fn test_versionning(&self, data: u32) -> bool { + data == 42 || data == 50 + } + + #[version(2)] + fn test_versionning(&self, data: u32) -> bool { + data == 42 + } } /// This function is not used, but we require it for the compiler to include `sp-io`. @@ -231,4 +240,14 @@ wasm_export_functions! { } assert_eq!(0, len); } + + fn test_versionning_works() { + // we fix new api to accept only 42 as a proper input + // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input + // which accepted 42 and 50. + assert!(test_api::test_versionning(42)); + + assert!(!test_api::test_versionning(50)); + assert!(!test_api::test_versionning(102)); + } } diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index 53c05b68b3df285041dad7de43c4236c75566fb1..f3bee038c8204c612444c8c3d6e34a4e7f3e2126 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -9,9 +9,13 @@ homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } +sp-runtime-interface = { version = "2.0.0-alpha.5", path = "../" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../../client/executor" } sp-runtime-interface-test-wasm = { version = "2.0.0-dev", path = "../test-wasm" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } -sp-io = { version = "2.0.0-alpha.2", path = "../../io" } +sp-runtime-interface-test-wasm-deprecated = { version = "2.0.0-dev", path = "../test-wasm-deprecated" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../runtime" } +sp-io = { version = "2.0.0-alpha.5", path = "../../io" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 5236bf147e0f085e0d232648d8c480b62fa9f1bc..110eda980fd6947166768150e55c464c31c9ac80 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -20,13 +20,16 @@ #![cfg(test)] use sp_runtime_interface::*; + use sp_runtime_interface_test_wasm::{WASM_BINARY, test_api::HostFunctions}; +use sp_runtime_interface_test_wasm_deprecated::WASM_BINARY as WASM_BINARY_DEPRECATED; + use sp_wasm_interface::HostFunctions as HostFunctionsT; use sc_executor::CallInWasm; type TestExternalities = sp_state_machine::TestExternalities; -fn call_wasm_method(method: &str) -> TestExternalities { +fn call_wasm_method(binary: &[u8], method: &str) -> TestExternalities { let mut ext = TestExternalities::default(); let mut ext_ext = ext.ext(); let mut host_functions = HF::host_functions(); @@ -40,7 +43,7 @@ fn call_wasm_method(method: &str) -> TestExternalities { 8, ); executor.call_in_wasm( - &WASM_BINARY[..], + binary, None, method, &[], @@ -52,17 +55,17 @@ fn call_wasm_method(method: &str) -> TestExternalities { #[test] fn test_return_data() { - call_wasm_method::("test_return_data"); + call_wasm_method::(&WASM_BINARY[..], "test_return_data"); } #[test] fn test_return_option_data() { - call_wasm_method::("test_return_option_data"); + call_wasm_method::(&WASM_BINARY[..], "test_return_option_data"); } #[test] fn test_set_storage() { - let mut ext = call_wasm_method::("test_set_storage"); + let mut ext = call_wasm_method::(&WASM_BINARY[..], "test_set_storage"); let expected = "world"; assert_eq!(expected.as_bytes(), &ext.ext().storage("hello".as_bytes()).unwrap()[..]); @@ -70,22 +73,22 @@ fn test_set_storage() { #[test] fn test_return_value_into_mutable_reference() { - call_wasm_method::("test_return_value_into_mutable_reference"); + call_wasm_method::(&WASM_BINARY[..], "test_return_value_into_mutable_reference"); } #[test] fn test_get_and_return_array() { - call_wasm_method::("test_get_and_return_array"); + call_wasm_method::(&WASM_BINARY[..], "test_get_and_return_array"); } #[test] fn test_array_as_mutable_reference() { - call_wasm_method::("test_array_as_mutable_reference"); + call_wasm_method::(&WASM_BINARY[..], "test_array_as_mutable_reference"); } #[test] fn test_return_input_public_key() { - call_wasm_method::("test_return_input_public_key"); + call_wasm_method::(&WASM_BINARY[..], "test_return_input_public_key"); } #[test] @@ -93,7 +96,7 @@ fn test_return_input_public_key() { expected = "Instantiation: Export ext_test_api_return_input_version_1 not found" )] fn host_function_not_found() { - call_wasm_method::<()>("test_return_data"); + call_wasm_method::<()>(&WASM_BINARY[..], "test_return_data"); } #[test] @@ -104,30 +107,46 @@ fn host_function_not_found() { \\\"Invalid utf8 data provided\\\")) }\"" )] fn test_invalid_utf8_data_should_return_an_error() { - call_wasm_method::("test_invalid_utf8_data_should_return_an_error"); + call_wasm_method::(&WASM_BINARY[..], "test_invalid_utf8_data_should_return_an_error"); } #[test] fn test_overwrite_native_function_implementation() { - call_wasm_method::("test_overwrite_native_function_implementation"); + call_wasm_method::(&WASM_BINARY[..], "test_overwrite_native_function_implementation"); } #[test] fn test_u128_i128_as_parameter_and_return_value() { - call_wasm_method::("test_u128_i128_as_parameter_and_return_value"); + call_wasm_method::(&WASM_BINARY[..], "test_u128_i128_as_parameter_and_return_value"); } #[test] fn test_vec_return_value_memory_is_freed() { - call_wasm_method::("test_vec_return_value_memory_is_freed"); + call_wasm_method::(&WASM_BINARY[..], "test_vec_return_value_memory_is_freed"); } #[test] fn test_encoded_return_value_memory_is_freed() { - call_wasm_method::("test_encoded_return_value_memory_is_freed"); + call_wasm_method::(&WASM_BINARY[..], "test_encoded_return_value_memory_is_freed"); } #[test] fn test_array_return_value_memory_is_freed() { - call_wasm_method::("test_array_return_value_memory_is_freed"); + call_wasm_method::(&WASM_BINARY[..], "test_array_return_value_memory_is_freed"); +} + +#[test] +fn test_versionining_with_new_host_works() { + // We call to the new wasm binary with new host function. + call_wasm_method::( + &WASM_BINARY[..], + "test_versionning_works", + ); + + // we call to the old wasm binary with a new host functions + // old versions of host functions should be called and test should be ok! + call_wasm_method::( + &WASM_BINARY_DEPRECATED[..], + "test_versionning_works", + ); } diff --git a/primitives/runtime-interface/tests/ui/no_duplicate_versions.rs b/primitives/runtime-interface/tests/ui/no_duplicate_versions.rs new file mode 100644 index 0000000000000000000000000000000000000000..948c327aa1a8b03c0b146031c013e07ca9b589f9 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_duplicate_versions.rs @@ -0,0 +1,11 @@ +use sp_runtime_interface::runtime_interface; + +#[runtime_interface] +trait Test { + #[version(2)] + fn test() { } + #[version(2)] + fn test() { } +} + +fn main() {} diff --git a/primitives/runtime-interface/tests/ui/no_duplicate_versions.stderr b/primitives/runtime-interface/tests/ui/no_duplicate_versions.stderr new file mode 100644 index 0000000000000000000000000000000000000000..592dd9928c9687b875cc10d0d5ea15457c276c2d --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_duplicate_versions.stderr @@ -0,0 +1,11 @@ +error: Duplicated version attribute + --> $DIR/no_duplicate_versions.rs:7:2 + | +7 | #[version(2)] + | ^ + +error: Previous version with the same number defined here + --> $DIR/no_duplicate_versions.rs:5:2 + | +5 | #[version(2)] + | ^ diff --git a/primitives/runtime-interface/tests/ui/no_gaps_in_versions.rs b/primitives/runtime-interface/tests/ui/no_gaps_in_versions.rs new file mode 100644 index 0000000000000000000000000000000000000000..c468f48e37462015c7426f653e0012a528313a25 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_gaps_in_versions.rs @@ -0,0 +1,17 @@ +use sp_runtime_interface::runtime_interface; + +#[runtime_interface] +trait Test { + #[version(1)] + fn test2() {} + #[version(2)] + fn test2() {} + #[version(3)] + fn test2() {} + + fn test() { } + #[version(3)] + fn test() { } +} + +fn main() {} diff --git a/primitives/runtime-interface/tests/ui/no_gaps_in_versions.stderr b/primitives/runtime-interface/tests/ui/no_gaps_in_versions.stderr new file mode 100644 index 0000000000000000000000000000000000000000..cdefcf60c56a8c4d27ab352e11cbd42d4198d241 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_gaps_in_versions.stderr @@ -0,0 +1,5 @@ +error: Unexpected version attribute: missing version '2' for this function + --> $DIR/no_gaps_in_versions.rs:13:2 + | +13 | #[version(3)] + | ^ diff --git a/primitives/runtime-interface/tests/ui/no_generic_parameters.stderr b/primitives/runtime-interface/tests/ui/no_generic_parameters.stderr deleted file mode 100644 index c3e46655e5be790d126efa7a06efbbdd206866a9..0000000000000000000000000000000000000000 --- a/primitives/runtime-interface/tests/ui/no_generic_parameters.stderr +++ /dev/null @@ -1,11 +0,0 @@ -error: Generic parameters not supported. - --> $DIR/no_generic_parameters.rs:5:10 - | -5 | fn test() {} - | ^ - -error: Generic parameters not supported. - --> $DIR/no_generic_parameters.rs:4:12 - | -4 | trait Test { - | ^ diff --git a/primitives/runtime-interface/tests/ui/no_generic_parameters_method.rs b/primitives/runtime-interface/tests/ui/no_generic_parameters_method.rs new file mode 100644 index 0000000000000000000000000000000000000000..407942eb5ed8899d83a698f7af4b34c6f1ca4ee5 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_generic_parameters_method.rs @@ -0,0 +1,8 @@ +use sp_runtime_interface::runtime_interface; + +#[runtime_interface] +trait Test { + fn test() {} +} + +fn main() {} diff --git a/primitives/runtime-interface/tests/ui/no_generic_parameters_method.stderr b/primitives/runtime-interface/tests/ui/no_generic_parameters_method.stderr new file mode 100644 index 0000000000000000000000000000000000000000..8a549753ac9f9abf5f9cf2e1e3cd4e05d1f4b2c4 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_generic_parameters_method.stderr @@ -0,0 +1,5 @@ +error: Generic parameters not supported. + --> $DIR/no_generic_parameters_method.rs:5:10 + | +5 | fn test() {} + | ^ diff --git a/primitives/runtime-interface/tests/ui/no_generic_parameters.rs b/primitives/runtime-interface/tests/ui/no_generic_parameters_trait.rs similarity index 85% rename from primitives/runtime-interface/tests/ui/no_generic_parameters.rs rename to primitives/runtime-interface/tests/ui/no_generic_parameters_trait.rs index 17ddb00fab3b7a6c576518e452c6b4484ab8ed4d..35efac6761ca28acae520dca795e0995994b4f11 100644 --- a/primitives/runtime-interface/tests/ui/no_generic_parameters.rs +++ b/primitives/runtime-interface/tests/ui/no_generic_parameters_trait.rs @@ -2,7 +2,7 @@ use sp_runtime_interface::runtime_interface; #[runtime_interface] trait Test { - fn test() {} + fn test() {} } fn main() {} diff --git a/primitives/runtime-interface/tests/ui/no_generic_parameters_trait.stderr b/primitives/runtime-interface/tests/ui/no_generic_parameters_trait.stderr new file mode 100644 index 0000000000000000000000000000000000000000..794e30bca767d3a3c0aed33713d7baca5d268f28 --- /dev/null +++ b/primitives/runtime-interface/tests/ui/no_generic_parameters_trait.stderr @@ -0,0 +1,5 @@ +error: Generic parameters not supported. + --> $DIR/no_generic_parameters_trait.rs:4:12 + | +4 | trait Test { + | ^ diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index fd40b9406aa23d9707f677af0fe48529b39aefa6..0e71e8becd61e4ea18a77e9ae4b5afcc031d731d 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-runtime" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,18 +12,18 @@ documentation = "https://docs.rs/sp-runtime" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0-alpha.2", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../io" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "2.0.0-alpha.5", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../inherents" } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } hash256-std-hasher = { version = "0.15.2", default-features = false } [dev-dependencies] @@ -32,6 +32,7 @@ rand = "0.7.2" [features] bench = [] +runtime-benchmarks = [] default = ["std"] std = [ "sp-application-crypto/std", @@ -47,3 +48,6 @@ std = [ "parity-util-mem/std", "hash256-std-hasher/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index e04ce77fb2cbb7dc1d5ed7d028e621799138153c..b00cbed6525a16cf6f1db35eed8f5b6e225437f3 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -16,7 +16,7 @@ //! Provides some utilities to define a piecewise linear function. -use crate::{Perbill, PerThing, traits::{AtLeast32Bit, SaturatedConversion}}; +use crate::{Perbill, traits::{AtLeast32Bit, SaturatedConversion}}; use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index 25a8274354ae2aa60be8a97ff368aec2782abb2b..a329f334c0d7707edc8a4e07ada8eac443accce2 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,10 +18,9 @@ //! stage. use crate::traits::{ - self, Member, MaybeDisplay, SignedExtension, Dispatchable, + self, Member, MaybeDisplay, SignedExtension, Dispatchable, DispatchInfoOf, ValidateUnsigned, }; -use crate::traits::ValidateUnsigned; -use crate::transaction_validity::TransactionValidity; +use crate::transaction_validity::{TransactionValidity, TransactionSource}; /// Definition of something that the external world might want to say; its /// existence implies that it has been checked and is good, particularly with @@ -36,47 +35,53 @@ pub struct CheckedExtrinsic { pub function: Call, } -impl traits::Applyable for +impl traits::Applyable for CheckedExtrinsic where AccountId: Member + MaybeDisplay, Call: Member + Dispatchable, - Extra: SignedExtension, + Extra: SignedExtension, Origin: From>, - Info: Clone, { type Call = Call; - type DispatchInfo = Info; fn validate>( &self, - info: Self::DispatchInfo, + // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? + // Perhaps a change for 2.0 to avoid breaking too much APIs? + source: TransactionSource, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info.clone(), len) + Extra::validate(extra, id, &self.function, info, len) } else { let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(&self.function)?; + let unsigned_validation = U::validate_unsigned(source, &self.function)?; Ok(valid.combine_with(unsigned_validation)) } } fn apply>( self, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResult { let (maybe_who, pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info.clone(), len)?; + let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; (Some(id), pre) } else { - let pre = Extra::pre_dispatch_unsigned(&self.function, info.clone(), len)?; + let pre = Extra::pre_dispatch_unsigned(&self.function, info, len)?; U::pre_dispatch(&self.function)?; (None, pre) }; let res = self.function.dispatch(Origin::from(maybe_who)); - Extra::post_dispatch(pre, info.clone(), len); - Ok(res.map_err(Into::into)) + let post_info = match res { + Ok(info) => info, + Err(err) => err.post_info, + }; + let res = res.map(|_| ()).map_err(|e| e.error); + Extra::post_dispatch(pre, info, &post_info, len, &res)?; + Ok(res) } } diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index a516bc1f7fa999f1087718f93f072ff8eff45099..3e9e52ba8bedac6417bcf5f9a49b79a172923229 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -357,7 +357,6 @@ mod tests { type AccountId = u64; type Call = (); type AdditionalSigned = (); - type DispatchInfo = (); type Pre = (); fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index 632deb13cbb45af6bd2271c2a5073b6ab05f0619..c80971b576df2d9c6312bb3df0966ebc942cc680 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -68,7 +68,10 @@ pub use sp_application_crypto::{RuntimeAppPublic, BoundToRuntimeAppPublic}; pub use sp_core::RuntimeDebug; /// Re-export top-level arithmetic stuff. -pub use sp_arithmetic::{Perquintill, Perbill, Permill, Percent, Rational128, Fixed64, PerThing}; +pub use sp_arithmetic::{ + Perquintill, Perbill, Permill, Percent, PerU16, Rational128, Fixed64, PerThing, + traits::SaturatedConversion, +}; /// Re-export 128 bit helpers. pub use sp_arithmetic::helpers_128bit; /// Re-export big_uint stuff. @@ -350,10 +353,15 @@ impl From for DispatchOutcome { } } -/// Result of a module function call; either nothing (functions are only called for "side effects") -/// or an error message. +/// This is the legacy return type of `Dispatchable`. It is still exposed for compatibilty +/// reasons. The new return type is `DispatchResultWithInfo`. +/// FRAME runtimes should use frame_support::dispatch::DispatchResult pub type DispatchResult = sp_std::result::Result<(), DispatchError>; +/// Return type of a `Dispatchable` which contains the `DispatchResult` and additional information +/// about the `Dispatchable` that is only known post dispatch. +pub type DispatchResultWithInfo = sp_std::result::Result>; + /// Reason why a dispatch call failed #[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize))] @@ -376,6 +384,18 @@ pub enum DispatchError { }, } +/// Result of a `Dispatchable` which contains the `DispatchResult` and additional information +/// about the `Dispatchable` that is only known post dispatch. +#[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] +pub struct DispatchErrorWithPostInfo where + Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +{ + /// Addditional information about the `Dispatchable` which is only known post dispatch. + pub post_info: Info, + /// The actual `DispatchResult` indicating whether the dispatch was succesfull. + pub error: DispatchError, +} + impl DispatchError { /// Return the same error but without the attached message. pub fn stripped(self) -> Self { @@ -387,6 +407,18 @@ impl DispatchError { } } +impl From for DispatchErrorWithPostInfo where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + Default, + E: Into +{ + fn from(error: E) -> Self { + Self { + post_info: Default::default(), + error: error.into(), + } + } +} + impl From for DispatchError { fn from(_: crate::traits::LookupError) -> Self { Self::CannotLookup @@ -416,6 +448,14 @@ impl From for &'static str { } } +impl From> for &'static str where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +{ + fn from(err: DispatchErrorWithPostInfo) -> &'static str { + err.error.into() + } +} + impl traits::Printable for DispatchError { fn print(&self) { "DispatchError".print(); @@ -434,6 +474,16 @@ impl traits::Printable for DispatchError { } } +impl traits::Printable for DispatchErrorWithPostInfo where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +{ + fn print(&self) { + self.error.print(); + "PostInfo: ".print(); + self.post_info.print(); + } +} + /// This type specifies the outcome of dispatching a call to a module. /// /// In case of failure an error specific to the module is returned. diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 24390700542112b7083b17025e01ec167e43a995..1414a5f4f0a753d02abc6ed2c25c574bfbe531f7 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -21,13 +21,13 @@ use std::{fmt::Debug, ops::Deref, fmt, cell::RefCell}; use crate::codec::{Codec, Encode, Decode}; use crate::traits::{ self, Checkable, Applyable, BlakeTwo256, OpaqueKeys, - SignedExtension, Dispatchable, + SignedExtension, Dispatchable, DispatchInfoOf, }; use crate::traits::ValidateUnsigned; use crate::{generic, KeyTypeId, ApplyExtrinsicResult}; pub use sp_core::{H256, sr25519}; use sp_core::{crypto::{CryptoType, Dummy, key_types, Public}, U256}; -use crate::transaction_validity::{TransactionValidity, TransactionValidityError}; +use crate::transaction_validity::{TransactionValidity, TransactionValidityError, TransactionSource}; /// Authority Id #[derive(Default, PartialEq, Eq, Clone, Encode, Decode, Debug, Hash, Serialize, Deserialize, PartialOrd, Ord)] @@ -345,19 +345,18 @@ impl traits::Extrinsic for TestXt } } -impl Applyable for TestXt where +impl Applyable for TestXt where Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, - Extra: SignedExtension, + Extra: SignedExtension, Origin: From>, - Info: Clone, { type Call = Call; - type DispatchInfo = Info; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate>( &self, - _info: Self::DispatchInfo, + _source: TransactionSource, + _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { Ok(Default::default()) @@ -367,7 +366,7 @@ impl Applyable for TestXt where /// index and sender. fn apply>( self, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> ApplyExtrinsicResult { let maybe_who = if let Some((who, extra)) = self.signature { @@ -378,6 +377,6 @@ impl Applyable for TestXt where None }; - Ok(self.call.dispatch(maybe_who.into()).map_err(Into::into)) + Ok(self.call.dispatch(maybe_who.into()).map(|_| ()).map_err(|e| e.error)) } } diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index 39e015505b20b3f5aecb7789c1144466e8ac42d3..fdf1d6396d26f84f54d891240fff310c966aa1c9 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -17,7 +17,7 @@ //! Primitives for the runtime modules. use sp_std::prelude::*; -use sp_std::{self, result, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; +use sp_std::{self, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; use sp_io; #[cfg(feature = "std")] use std::fmt::Display; @@ -28,7 +28,8 @@ use serde::{Serialize, Deserialize, de::DeserializeOwned}; use sp_core::{self, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ - ValidTransaction, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + UnknownTransaction, }; use crate::generic::{Digest, DigestItem}; pub use sp_arithmetic::traits::{ @@ -38,6 +39,7 @@ pub use sp_arithmetic::traits::{ }; use sp_application_crypto::AppKey; use impl_trait_for_tuples::impl_for_tuples; +use crate::DispatchResult; /// A lazy value. pub trait Lazy { @@ -146,18 +148,6 @@ impl From for &'static str { } } -/// Some sort of check on the origin is performed by this object. -pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result { - Self::try_origin(o).map_err(|_| BadOrigin) - } - /// Perform the origin check. - fn try_origin(o: OuterOrigin) -> result::Result; -} - /// An error that indicates that a lookup failed. #[derive(Encode, Decode, RuntimeDebug)] pub struct LookupError; @@ -329,51 +319,6 @@ impl > SimpleBitOps for T {} -/// The block finalization trait. Implementing this lets you express what should happen -/// for your module when the block is ending. -#[impl_for_tuples(30)] -pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} -} - -/// The block initialization trait. Implementing this lets you express what should happen -/// for your module when the block is beginning (right before the first extrinsic is executed). -#[impl_for_tuples(30)] -pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - fn on_initialize(_n: BlockNumber) {} -} - -/// The runtime upgrade trait. Implementing this lets you express what should happen -/// when the runtime upgrades, and changes may need to occur to your module. -#[impl_for_tuples(30)] -pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - fn on_runtime_upgrade() {} -} - -/// Off-chain computation trait. -/// -/// Implementing this trait on a module allows you to perform long-running tasks -/// that make (by default) validators generate transactions that feed results -/// of those long-running computations back on chain. -/// -/// NOTE: This function runs off-chain, so it can access the block state, -/// but cannot preform any alterations. More specifically alterations are -/// not forbidden, but they are not persisted in any way after the worker -/// has finished. -#[impl_for_tuples(30)] -pub trait OffchainWorker { - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} -} - /// Abstraction around hashing // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. @@ -680,8 +625,30 @@ pub trait Dispatchable { type Origin; /// ... type Trait; - /// Actually dispatch this call and result the result of it. - fn dispatch(self, origin: Self::Origin) -> crate::DispatchResult; + /// An opaque set of information attached to the transaction. This could be constructed anywhere + /// down the line in a runtime. The current Substrate runtime uses a struct with the same name + /// to represent the dispatch class and weight. + type Info; + /// Additional information that is returned by `dispatch`. Can be used to supply the caller + /// with information about a `Dispatchable` that is ownly known post dispatch. + type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; + /// Actually dispatch this call and return the result of it. + fn dispatch(self, origin: Self::Origin) -> crate::DispatchResultWithInfo; +} + +/// Shortcut to reference the `Info` type of a `Dispatchable`. +pub type DispatchInfoOf = ::Info; +/// Shortcut to reference the `PostInfo` type of a `Dispatchable`. +pub type PostDispatchInfoOf = ::PostInfo; + +impl Dispatchable for () { + type Origin = (); + type Trait = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { + panic!("This implemention should not be used for actual dispatch."); + } } /// Means by which a transaction may be extended. This type embodies both the data and the logic @@ -697,7 +664,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq type AccountId; /// The type which encodes the call to be dispatched. - type Call; + type Call: Dispatchable; /// Any additional data that will go into the signed payload. This may be created dynamically /// from the transaction using the `additional_signed` function. @@ -706,11 +673,6 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. type Pre: Default; - /// An opaque set of information attached to the transaction. This could be constructed anywhere - /// down the line in a runtime. The current Substrate runtime uses a struct with the same name - /// to represent the dispatch class and weight. - type DispatchInfo: Clone; - /// Construct any additional data that should be in the signed payload of the transaction. Can /// also perform any pre-signature-verification checks and return an error if needed. fn additional_signed(&self) -> Result; @@ -728,7 +690,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq &self, _who: &Self::AccountId, _call: &Self::Call, - _info: Self::DispatchInfo, + _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { Ok(ValidTransaction::default()) @@ -746,7 +708,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq self, who: &Self::AccountId, call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result { self.validate(who, call, info.clone(), len) @@ -764,7 +726,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. fn validate_unsigned( _call: &Self::Call, - _info: Self::DispatchInfo, + _info: &DispatchInfoOf, _len: usize, ) -> TransactionValidity { Ok(ValidTransaction::default()) @@ -780,7 +742,7 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq /// perform the same validation as in `validate_unsigned`. fn pre_dispatch_unsigned( call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result { Self::validate_unsigned(call, info.clone(), len) @@ -788,8 +750,28 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq .map_err(Into::into) } - /// Do any post-flight stuff for a transaction. - fn post_dispatch(_pre: Self::Pre, _info: Self::DispatchInfo, _len: usize) { } + /// Do any post-flight stuff for an extrinsic. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. + /// + /// It can only be used safely when you *know* that the extrinsic is one that can only be + /// introduced by the current block author; generally this implies that it is an inherent and + /// will come from either an offchain-worker or via `InherentData`. + fn post_dispatch( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } /// Returns the list of unique identifier for this signed extension. /// @@ -804,11 +786,10 @@ pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq } #[impl_for_tuples(1, 12)] -impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); +impl SignedExtension for Tuple { + for_tuples!( where #( Tuple: SignedExtension )* ); type AccountId = AccountId; type Call = Call; - type DispatchInfo = Info; const IDENTIFIER: &'static str = "You should call `identifier()`!"; for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); @@ -821,44 +802,47 @@ impl SignedExtension for Tuple { &self, who: &Self::AccountId, call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info.clone(), len)?); )* ); + for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); Ok(valid) } - fn pre_dispatch(self, who: &Self::AccountId, call: &Self::Call, info: Self::DispatchInfo, len: usize) + fn pre_dispatch(self, who: &Self::AccountId, call: &Self::Call, info: &DispatchInfoOf, len: usize) -> Result { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info.clone(), len)? ),* ) )) + Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) } fn validate_unsigned( call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity { let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info.clone(), len)?); )* ); + for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); Ok(valid) } fn pre_dispatch_unsigned( call: &Self::Call, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> Result { - Ok(for_tuples!( ( #( Tuple::pre_dispatch_unsigned(call, info.clone(), len)? ),* ) )) + Ok(for_tuples!( ( #( Tuple::pre_dispatch_unsigned(call, info, len)? ),* ) )) } fn post_dispatch( pre: Self::Pre, - info: Self::DispatchInfo, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, len: usize, - ) { - for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info.clone(), len); )* ) + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result)?; )* ); + Ok(()) } fn identifier() -> Vec<&'static str> { @@ -875,7 +859,6 @@ impl SignedExtension for () { type AdditionalSigned = (); type Call = (); type Pre = (); - type DispatchInfo = (); const IDENTIFIER: &'static str = "UnitSignedExtension"; fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } } @@ -888,15 +871,13 @@ impl SignedExtension for () { /// each piece of attributable information to be disambiguated. pub trait Applyable: Sized + Send + Sync { /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. - type Call; - - /// An opaque set of information attached to the transaction. - type DispatchInfo: Clone; + type Call: Dispatchable; /// Checks to see if this is a valid *transaction*. It returns information on it if so. fn validate>( &self, - info: Self::DispatchInfo, + source: TransactionSource, + info: &DispatchInfoOf, len: usize, ) -> TransactionValidity; @@ -904,7 +885,7 @@ pub trait Applyable: Sized + Send + Sync { /// index and sender. fn apply>( self, - info: Self::DispatchInfo, + info: &DispatchInfoOf, len: usize, ) -> crate::ApplyExtrinsicResult; } @@ -942,7 +923,7 @@ pub trait ValidateUnsigned { /// /// Changes made to storage WILL be persisted if the call returns `Ok`. fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(call) + Self::validate_unsigned(TransactionSource::InBlock, call) .map(|_| ()) .map_err(Into::into) } @@ -953,7 +934,7 @@ pub trait ValidateUnsigned { /// whether the transaction would panic if it were included or not. /// /// Changes made to storage should be discarded by caller. - fn validate_unsigned(call: &Self::Call) -> TransactionValidity; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity; } /// Opaque data type that may be destructured into a series of raw byte slices (which represent @@ -966,7 +947,7 @@ pub trait OpaqueKeys: Clone { fn key_ids() -> &'static [crate::KeyTypeId]; /// Get the raw bytes of key with key-type ID `i`. fn get_raw(&self, i: super::KeyTypeId) -> &[u8]; - /// Get the decoded key with index `i`. + /// Get the decoded key with key-type ID `i`. fn get(&self, i: super::KeyTypeId) -> Option { T::decode(&mut self.get_raw(i)).ok() } @@ -1293,6 +1274,22 @@ impl Printable for &str { } } +impl Printable for bool { + fn print(&self) { + if *self { + "true".print() + } else { + "false".print() + } + } +} + +impl Printable for () { + fn print(&self) { + "()".print() + } +} + #[impl_for_tuples(1, 12)] impl Printable for Tuple { fn print(&self) { diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 6dfb80b6d7e4568b0bb0edabe51592e0506261c9..95903b4876262d29b97d7362782eed07290aa4cf 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -52,6 +52,13 @@ pub enum InvalidTransaction { ExhaustsResources, /// Any other custom invalid validity that is not covered by this enum. Custom(u8), + /// An extrinsic with a Mandatory dispatch resulted in Error. This is indicative of either a + /// malicious validator or a buggy `provide_inherent`. In any case, it can result in dangerously + /// overweight blocks and therefore if found, invalidates the block. + BadMandatory, + /// A transaction with a mandatory dispatch. This is invalid; only inherent extrinsics are + /// allowed to have mandatory dispatches. + MandatoryDispatch, } impl InvalidTransaction { @@ -62,6 +69,14 @@ impl InvalidTransaction { _ => false, } } + + /// Returns if the reason for the invalidity was a mandatory call failing. + pub fn was_mandatory(&self) -> bool { + match self { + Self::BadMandatory => true, + _ => false, + } + } } impl From for &'static str { @@ -76,6 +91,10 @@ impl From for &'static str { "Transaction would exhausts the block limits", InvalidTransaction::Payment => "Inability to pay some fees (e.g. account balance too low)", + InvalidTransaction::BadMandatory => + "A call was labelled as mandatory, but resulted in an Error.", + InvalidTransaction::MandatoryDispatch => + "Tranaction dispatch is mandatory; transactions may not have mandatory dispatches.", InvalidTransaction::Custom(_) => "InvalidTransaction custom error", } } @@ -123,6 +142,15 @@ impl TransactionValidityError { Self::Unknown(_) => false, } } + + /// Returns `true` if the reason for the error was it being a mandatory dispatch that could not + /// be completed successfully. + pub fn was_mandatory(&self) -> bool { + match self { + Self::Invalid(e) => e.was_mandatory(), + Self::Unknown(_) => false, + } + } } impl From for &'static str { @@ -161,6 +189,35 @@ impl Into for UnknownTransaction { } } +/// The source of the transaction. +/// +/// Depending on the source we might apply different validation schemes. +/// For instance we can disallow specific kinds of transactions if they were not produced +/// by our local node (for instance off-chain workers). +#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf)] +pub enum TransactionSource { + /// Transaction is already included in block. + /// + /// This means that we can't really tell where the transaction is coming from, + /// since it's already in the received block. Note that the custom validation logic + /// using either `Local` or `External` should most likely just allow `InBlock` + /// transactions as well. + InBlock, + + /// Transaction is coming from a local source. + /// + /// This means that the transaction was produced internally by the node + /// (for instance an Off-Chain Worker, or an Off-Chain Call), as opposed + /// to being received over the network. + Local, + + /// Transaction has been received externally. + /// + /// This means the transaction has been received from (usually) "untrusted" source, + /// for instance received over the network or RPC. + External, +} + /// Information concerning a valid transaction. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct ValidTransaction { @@ -207,6 +264,17 @@ impl Default for ValidTransaction { } impl ValidTransaction { + /// Initiate `ValidTransaction` builder object with a particular prefix for tags. + /// + /// To avoid conflicts between different parts in runtime it's recommended to build `requires` + /// and `provides` tags with a unique prefix. + pub fn with_tag_prefix(prefix: &'static str) -> ValidTransactionBuilder { + ValidTransactionBuilder { + prefix: Some(prefix), + validity: Default::default(), + } + } + /// Combine two instances into one, as a best effort. This will take the superset of each of the /// `provides` and `requires` tags, it will sum the priorities, take the minimum longevity and /// the logic *And* of the propagate flags. @@ -221,6 +289,104 @@ impl ValidTransaction { } } +/// `ValidTransaction` builder. +/// +/// +/// Allows to easily construct `ValidTransaction` and most importantly takes care of +/// prefixing `requires` and `provides` tags to avoid conflicts. +#[derive(Default, Clone, RuntimeDebug)] +pub struct ValidTransactionBuilder { + prefix: Option<&'static str>, + validity: ValidTransaction, +} + +impl ValidTransactionBuilder { + /// Set the priority of a transaction. + /// + /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Most likely for unsigned transactions you want the priority to be higher + /// than for regular transactions. We recommend exposing a base priority for unsigned + /// transactions as a runtime module parameter, so that the runtime can tune inter-module + /// priorities. + pub fn priority(mut self, priority: TransactionPriority) -> Self { + self.validity.priority = priority; + self + } + + /// Set the longevity of a transaction. + /// + /// By default the transaction will be considered valid forever and will not be revalidated + /// by the transaction pool. It's recommended though to set the longevity to a finite value + /// though. If unsure, it's also reasonable to expose this parameter via module configuration + /// and let the runtime decide. + pub fn longevity(mut self, longevity: TransactionLongevity) -> Self { + self.validity.longevity = longevity; + self + } + + /// Set the propagate flag. + /// + /// Set to `false` if the transaction is not meant to be gossiped to peers. Combined with + /// `TransactionSource::Local` validation it can be used to have special kind of + /// transactions that are only produced and included by the validator nodes. + pub fn propagate(mut self, propagate: bool) -> Self { + self.validity.propagate = propagate; + self + } + + /// Add a `TransactionTag` to the set of required tags. + /// + /// The tag will be encoded and prefixed with module prefix (if any). + /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. + pub fn and_requires(mut self, tag: impl Encode) -> Self { + self.validity.requires.push(match self.prefix.as_ref() { + Some(prefix) => (prefix, tag).encode(), + None => tag.encode(), + }); + self + } + + /// Add a `TransactionTag` to the set of provided tags. + /// + /// The tag will be encoded and prefixed with module prefix (if any). + /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. + pub fn and_provides(mut self, tag: impl Encode) -> Self { + self.validity.provides.push(match self.prefix.as_ref() { + Some(prefix) => (prefix, tag).encode(), + None => tag.encode(), + }); + self + } + + /// Augment the builder with existing `ValidTransaction`. + /// + /// This method does add the prefix to `require` or `provides` tags. + pub fn combine_with(mut self, validity: ValidTransaction) -> Self { + self.validity = core::mem::take(&mut self.validity).combine_with(validity); + self + } + + /// Finalize the builder and produce `TransactionValidity`. + /// + /// Note the result will always be `Ok`. Use `Into` to produce `ValidTransaction`. + pub fn build(self) -> TransactionValidity { + self.into() + } +} + +impl From for TransactionValidity { + fn from(builder: ValidTransactionBuilder) -> Self { + Ok(builder.into()) + } +} + +impl From for ValidTransaction { + fn from(builder: ValidTransactionBuilder) -> Self { + builder.validity + } +} + + #[cfg(test)] mod tests { use super::*; @@ -244,4 +410,26 @@ mod tests { // decode back assert_eq!(TransactionValidity::decode(&mut &*encoded), Ok(v)); } + + #[test] + fn builder_should_prefix_the_tags() { + const PREFIX: &str = "test"; + let a: ValidTransaction = ValidTransaction::with_tag_prefix(PREFIX) + .and_requires(1) + .and_requires(2) + .and_provides(3) + .and_provides(4) + .propagate(false) + .longevity(5) + .priority(3) + .priority(6) + .into(); + assert_eq!(a, ValidTransaction { + propagate: false, + longevity: 5, + priority: 6, + requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], + provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], + }); + } } diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index 060801a29e41e28df705a58353aca85baefc19ac..5c1595027f86fd129ebec97ffebe63b2b1263ca7 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-sandbox" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,11 +10,11 @@ description = "This crate provides means to instantiate and execute wasm modules [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../wasm-interface" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../io" } +sp-wasm-interface = { version = "2.0.0-alpha.5", default-features = false, path = "../wasm-interface" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } [dev-dependencies] wabt = "0.9.2" @@ -31,3 +31,6 @@ std = [ "sp-wasm-interface/std", ] strict = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index 39f78c078b0caa8ee28a079155910005fcbf3916..75263321b805c85793e09f90ec0616ab1a6c4203 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-serializer" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,3 +12,6 @@ documentation = "https://docs.rs/sp-serializer" [dependencies] serde = "1.0.101" serde_json = "1.0.41" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index d01f7ee440cf12e01a9cbca40c03d9082c34d073..ffe1bc327f7ffd9edb5f3c003abeb0f4d66077f3 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-session" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,11 +9,14 @@ repository = "https://github.com/paritytech/substrate/" description = "Primitives for sessions" [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } -sp-runtime = { version = "2.0.0-alpha.2", optional = true, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } +sp-runtime = { version = "2.0.0-alpha.5", optional = true, path = "../runtime" } [features] default = [ "std" ] std = [ "sp-api/std", "sp-std/std", "sp-runtime", "sp-core/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 2f85b8251b0ae4035aa98cceeaf56a6737c7cf21..60bf3f759e8dfcf0f3d2efe6bec7a3d231d291b8 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-staking" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,9 +9,9 @@ repository = "https://github.com/paritytech/substrate/" description = "A crate which contains primitives that are useful for implementation that uses staking approaches in general. Definitions related to sessions, slashing, etc go here." [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } [features] default = ["std"] @@ -20,3 +20,6 @@ std = [ "sp-runtime/std", "sp-std/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 06e73f018b7656a691801b93b2178631cacd5ddd..584f3a75ea3abc2befd80821dba2f6b7349b2c5d 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -142,11 +142,20 @@ pub trait OnOffenceHandler { /// Zero is a valid value for a fraction. /// /// The `session` parameter is the session index of the offence. + /// + /// The receiver might decide to not accept this offence. In this case, the call site is + /// responsible for queuing the report and re-submitting again. fn on_offence( offenders: &[OffenceDetails], slash_fraction: &[Perbill], session: SessionIndex, - ); + ) -> Result<(), ()>; + + /// Can an offence be reported now or not. This is an method to short-circuit a call into + /// `on_offence`. Ideally, a correct implementation should return `false` if `on_offence` will + /// return `Err`. Nonetheless, this is up to the implementation and this trait cannot guarantee + /// it. + fn can_report() -> bool; } impl OnOffenceHandler for () { @@ -154,7 +163,9 @@ impl OnOffenceHandler for () { _offenders: &[OffenceDetails], _slash_fraction: &[Perbill], _session: SessionIndex, - ) {} + ) -> Result<(), ()> { Ok(()) } + + fn can_report() -> bool { true } } /// A details about an offending authority for a particular kind of offence. diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 16f921c3ac137171d7a90f7820b8c41dcc382457..548f3f43086e928986dd5c546f105e86e2ebf0c1 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-state-machine" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" @@ -15,17 +15,20 @@ parking_lot = "0.10.0" hash-db = "0.15.2" trie-db = "0.20.0" trie-root = "0.16.0" -sp-trie = { version = "2.0.0-alpha.2", path = "../trie" } -sp-core = { version = "2.0.0-alpha.2", path = "../core" } -sp-panic-handler = { version = "2.0.0-alpha.2", path = "../panic-handler" } -codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-trie = { version = "2.0.0-alpha.5", path = "../trie" } +sp-core = { version = "2.0.0-alpha.5", path = "../core" } +sp-panic-handler = { version = "2.0.0-alpha.5", path = "../panic-handler" } +codec = { package = "parity-scale-codec", version = "1.3.0" } num-traits = "0.2.8" rand = "0.7.2" -sp-externalities = { version = "0.8.0-alpha.2", path = "../externalities" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } [features] default = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 4fb59556e3fe46f582fc9435b9b8b6a4006cb67f..94144fdb90fda5e04746e584993a678698b8e5f6 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -206,13 +206,16 @@ pub trait Backend: std::fmt::Debug { (root, txs) } + /// Register stats from overlay of state machine. + /// + /// By default nothing is registered. + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats); + /// Query backend usage statistics (i/o, memory) /// /// Not all implementations are expected to be able to do this. In the /// case when they don't, empty statistics is returned. - fn usage_info(&self) -> UsageInfo { - UsageInfo::empty() - } + fn usage_info(&self) -> UsageInfo; /// Wipe the state database. fn wipe(&self) -> Result<(), Self::Error> { @@ -308,10 +311,12 @@ impl<'a, T: Backend, H: Hasher> Backend for &'a T { (*self).for_key_values_with_prefix(prefix, f); } + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + fn usage_info(&self) -> UsageInfo { (*self).usage_info() } - } +} /// Trait that allows consolidate two transactions together. pub trait Consolidate { diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index c731d4104b260ef1680d6f800c9812544c03ab38..39ad81ed59a0eb3dc6746f922be36f334299801a 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -469,6 +469,7 @@ mod test { ].into_iter().collect(), }, collect_extrinsics: true, + stats: Default::default(), }; let config = Configuration { digest_interval: 4, digest_levels: 2 }; diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index bbb25355a8ccef1a8c9233d98ce5c057d275e1de..133af7ccd9b5f1234b4b9e8a823ea56e43953d8a 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -573,7 +573,6 @@ mod tests { const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - fn prepare_overlay_with_changes() -> OverlayedChanges { OverlayedChanges { prospective: vec![ @@ -588,6 +587,7 @@ mod tests { ].into_iter().collect(), committed: Default::default(), collect_extrinsics: true, + stats: Default::default(), } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 7e474d45b650e480f35ee82bcae34828878a2ab3..ecd4532cf2640d5e590e6f7447cb84ec62e01bd0 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -20,6 +20,7 @@ use crate::{ StorageKey, StorageValue, StorageCollection, trie_backend::TrieBackend, backend::{Backend, insert_into_memory_db}, + stats::UsageInfo, }; use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; use hash_db::Hasher; @@ -357,6 +358,16 @@ impl Backend for InMemory where H::Out: Codec { self.trie = Some(TrieBackend::new(mdb, root)); self.trie.as_ref() } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + + fn usage_info(&self) -> UsageInfo { + UsageInfo::empty() + } + + fn wipe(&self) -> Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 4d80ee37c98d1d2b69ddaf75dec91f4b44053d90..9a2dc52cca20a91ef3b545f0a7eec3ad2220e6a0 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -18,7 +18,7 @@ #![warn(missing_docs)] -use std::{fmt, result, collections::HashMap, panic::UnwindSafe, marker::PhantomData}; +use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; use log::{warn, trace}; use hash_db::Hasher; use codec::{Decode, Encode, Codec}; @@ -73,7 +73,8 @@ pub use trie_backend_essence::{TrieBackendStorage, Storage}; pub use trie_backend::TrieBackend; pub use error::{Error, ExecutionError}; pub use in_memory_backend::InMemory as InMemoryBackend; -pub use stats::{UsageInfo, UsageUnit}; +pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; +pub use sp_core::traits::CloneableSpawn; type CallResult = Result, E>; @@ -188,9 +189,19 @@ pub struct StateMachine<'a, B, H, N, Exec> overlay: &'a mut OverlayedChanges, extensions: Extensions, changes_trie_state: Option>, - _marker: PhantomData<(H, N)>, storage_transaction_cache: Option<&'a mut StorageTransactionCache>, runtime_code: &'a RuntimeCode<'a>, + stats: StateMachineStats, +} + +impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, +{ + fn drop(&mut self) { + self.backend.register_overlay_stats(&self.stats); + } } impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where @@ -210,8 +221,10 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where call_data: &'a [u8], mut extensions: Extensions, runtime_code: &'a RuntimeCode, + spawn_handle: Box, ) -> Self { extensions.register(CallInWasmExt::new(exec.clone())); + extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); Self { backend, @@ -221,9 +234,9 @@ impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where extensions, overlay, changes_trie_state, - _marker: PhantomData, storage_transaction_cache: None, runtime_code, + stats: StateMachineStats::default(), } } @@ -437,6 +450,7 @@ pub fn prove_execution( mut backend: B, overlay: &mut OverlayedChanges, exec: &Exec, + spawn_handle: Box, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -454,6 +468,7 @@ where trie_backend, overlay, exec, + spawn_handle, method, call_data, runtime_code, @@ -473,6 +488,7 @@ pub fn prove_execution_on_trie_backend( trie_backend: &TrieBackend, overlay: &mut OverlayedChanges, exec: &Exec, + spawn_handle: Box, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -494,6 +510,7 @@ where call_data, Extensions::default(), runtime_code, + spawn_handle, ); let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( @@ -510,6 +527,7 @@ pub fn execution_proof_check( proof: StorageProof, overlay: &mut OverlayedChanges, exec: &Exec, + spawn_handle: Box, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -525,6 +543,7 @@ where &trie_backend, overlay, exec, + spawn_handle, method, call_data, runtime_code, @@ -536,6 +555,7 @@ pub fn execution_proof_check_on_trie_backend( trie_backend: &TrieBackend, H>, overlay: &mut OverlayedChanges, exec: &Exec, + spawn_handle: Box, method: &str, call_data: &[u8], runtime_code: &RuntimeCode, @@ -555,6 +575,7 @@ where call_data, Extensions::default(), runtime_code, + spawn_handle, ); sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( @@ -820,6 +841,7 @@ mod tests { &[], Default::default(), &wasm_code, + sp_core::tasks::executor(), ); assert_eq!( @@ -849,6 +871,7 @@ mod tests { &[], Default::default(), &wasm_code, + sp_core::tasks::executor(), ); assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]); @@ -875,6 +898,7 @@ mod tests { &[], Default::default(), &wasm_code, + sp_core::tasks::executor(), ); assert!( @@ -905,6 +929,7 @@ mod tests { remote_backend, &mut Default::default(), &executor, + sp_core::tasks::executor(), "test", &[], &RuntimeCode::empty(), @@ -916,6 +941,7 @@ mod tests { remote_proof, &mut Default::default(), &executor, + sp_core::tasks::executor(), "test", &[], &RuntimeCode::empty(), diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index 4eb44de7c58624a56c14ecd932edab5ca7ed3c74..ab50c61391bdb9f696bba1db1ca4d7874e9807a4 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -22,6 +22,7 @@ use crate::{ NO_EXTRINSIC_INDEX, BlockNumber, build_changes_trie, State as ChangesTrieState, }, + stats::StateMachineStats, }; #[cfg(test)] @@ -57,6 +58,8 @@ pub struct OverlayedChanges { pub(crate) committed: OverlayedChangeSet, /// True if extrinsics stats must be collected. pub(crate) collect_extrinsics: bool, + /// Collect statistic on this execution. + pub(crate) stats: StateMachineStats, } /// The storage value, used inside OverlayedChanges. @@ -206,7 +209,11 @@ impl OverlayedChanges { pub fn storage(&self, key: &[u8]) -> Option> { self.prospective.top.get(key) .or_else(|| self.committed.top.get(key)) - .map(|x| x.value.as_ref().map(AsRef::as_ref)) + .map(|x| { + let size_read = x.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + x.value.as_ref().map(AsRef::as_ref) + }) } /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred @@ -215,12 +222,16 @@ impl OverlayedChanges { pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { if let Some(map) = self.prospective.children.get(storage_key) { if let Some(val) = map.0.get(key) { + let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); return Some(val.value.as_ref().map(AsRef::as_ref)); } } if let Some(map) = self.committed.children.get(storage_key) { if let Some(val) = map.0.get(key) { + let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); return Some(val.value.as_ref().map(AsRef::as_ref)); } } @@ -232,6 +243,8 @@ impl OverlayedChanges { /// /// `None` can be used to delete a value specified by the given key. pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { + let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); let entry = self.prospective.top.entry(key).or_default(); entry.value = val; @@ -252,6 +265,8 @@ impl OverlayedChanges { key: StorageKey, val: Option, ) { + let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_write_overlay(size_write); let extrinsic_index = self.extrinsic_index(); let map_entry = self.prospective.children.entry(storage_key) .or_insert_with(|| (Default::default(), child_info.to_owned())); diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 119fb59a7234e919765887bd859b04587cbb3292..747872af831ac44fbebaccf0c6c571b735b4f73a 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -283,6 +283,12 @@ impl<'a, S, H> Backend for ProvingBackend<'a, S, H> { self.0.child_storage_root(storage_key, child_info, delta) } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + + fn usage_info(&self) -> crate::stats::UsageInfo { + self.0.usage_info() + } } /// Create proof check backend. diff --git a/primitives/state-machine/src/stats.rs b/primitives/state-machine/src/stats.rs index aa69b5be9d652846f2c78ae1638bbe45aa08b877..8fa03344ad330af985fd52622f061c7b6644597f 100644 --- a/primitives/state-machine/src/stats.rs +++ b/primitives/state-machine/src/stats.rs @@ -17,6 +17,7 @@ //! Usage statistics for state db use std::time::{Instant, Duration}; +use std::cell::RefCell; /// Measured count of operations and total bytes. #[derive(Clone, Debug, Default)] @@ -32,10 +33,19 @@ pub struct UsageUnit { pub struct UsageInfo { /// Read statistics (total). pub reads: UsageUnit, - /// Write statistics. + /// Write statistics (total). pub writes: UsageUnit, + /// Write trie nodes statistics. + pub nodes_writes: UsageUnit, + /// Write into cached state machine + /// change overlay. + pub overlay_writes: UsageUnit, + /// Removed trie nodes statistics. + pub removed_nodes: UsageUnit, /// Cache read statistics. pub cache_reads: UsageUnit, + /// Modified value read statistics. + pub modified_reads: UsageUnit, /// Memory used. pub memory: usize, @@ -45,6 +55,35 @@ pub struct UsageInfo { pub span: Duration, } +/// Accumulated usage statistics specific to state machine +/// crate. +#[derive(Debug, Default, Clone)] +pub struct StateMachineStats { + /// Number of read query from runtime + /// that hit a modified value (in state + /// machine overlay). + pub reads_modified: RefCell, + /// Size in byte of read queries that + /// hit a modified value. + pub bytes_read_modified: RefCell, + /// Number of time a write operation + /// occurs into the state machine overlay. + pub writes_overlay: RefCell, + /// Size in bytes of the writes overlay + /// operation. + pub bytes_writes_overlay: RefCell, +} + +impl StateMachineStats { + /// Accumulates some registered stats. + pub fn add(&self, other: &StateMachineStats) { + *self.reads_modified.borrow_mut() += *other.reads_modified.borrow(); + *self.bytes_read_modified.borrow_mut() += *other.bytes_read_modified.borrow(); + *self.writes_overlay.borrow_mut() += *other.writes_overlay.borrow(); + *self.bytes_writes_overlay.borrow_mut() += *other.bytes_writes_overlay.borrow(); + } +} + impl UsageInfo { /// Empty statistics. /// @@ -53,10 +92,34 @@ impl UsageInfo { Self { reads: UsageUnit::default(), writes: UsageUnit::default(), + overlay_writes: UsageUnit::default(), + nodes_writes: UsageUnit::default(), + removed_nodes: UsageUnit::default(), cache_reads: UsageUnit::default(), + modified_reads: UsageUnit::default(), memory: 0, started: Instant::now(), span: Default::default(), } } + /// Add collected state machine to this state. + pub fn include_state_machine_states(&mut self, count: &StateMachineStats) { + self.modified_reads.ops += *count.reads_modified.borrow(); + self.modified_reads.bytes += *count.bytes_read_modified.borrow(); + self.overlay_writes.ops += *count.writes_overlay.borrow(); + self.overlay_writes.bytes += *count.bytes_writes_overlay.borrow(); + } +} + +impl StateMachineStats { + /// Tally one read modified operation, of some length. + pub fn tally_read_modified(&self, data_bytes: u64) { + *self.reads_modified.borrow_mut() += 1; + *self.bytes_read_modified.borrow_mut() += data_bytes; + } + /// Tally one write overlay operation, of some length. + pub fn tally_write_overlay(&self, data_bytes: u64) { + *self.writes_overlay.borrow_mut() += 1; + *self.bytes_writes_overlay.borrow_mut() += data_bytes; + } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index e64dd590e51f711f2e59f0224f66e980121ee35f..f88e306a2fc3dad8ee1b7cfa6e45ead0f7e1a8b9 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -240,6 +240,12 @@ impl, H: Hasher> Backend for TrieBackend where fn as_trie_backend(&mut self) -> Option<&TrieBackend> { Some(self) } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } + + fn usage_info(&self) -> crate::UsageInfo { + crate::UsageInfo::empty() + } } #[cfg(test)] diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index 38a0c713c0563d88758ce4ace6e9eb790fc0369e..58ff78f2bbb510adc98a7de88415503cf71b0061 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-std" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -13,3 +13,6 @@ documentation = "https://docs.rs/sp-std" [features] default = ["std"] std = [] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index 7e434cdd898212aed62582e9e8dbcef92a16bd45..e4e842848dd0fe002e613099352b9f731bfeeeba 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-storage" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" @@ -10,11 +10,14 @@ repository = "https://github.com/paritytech/substrate/" documentation = "https://docs.rs/sp-storage/" [dependencies] -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } -sp-debug-derive = { version = "2.0.0-alpha.2", path = "../debug-derive" } +sp-debug-derive = { version = "2.0.0-alpha.5", path = "../debug-derive" } [features] default = [ "std" ] std = [ "sp-std/std", "serde", "impl-serde" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 8788876769c1b08d346fef7afbf359447c3e440b..b8cb583835d89b4c25c0fa1583c36c8f66489729 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -9,12 +9,12 @@ repository = "https://github.com/paritytech/substrate/" publish = false [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } [features] default = [ @@ -24,3 +24,6 @@ std = [ "sp-application-crypto/std", "serde", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 1fc40a113f90378078398c633f508a9871c843dc..4a0851ccb19a843da2537b15d31843b0100e8cb3 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-timestamp" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,11 +9,11 @@ repository = "https://github.com/paritytech/substrate/" description = "Substrate core types and inherents for timestamps." [dependencies] -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = "0.2" @@ -26,3 +26,6 @@ std = [ "codec/std", "sp-inherents/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index 979b98b4952c92eaf3ad9b7f73d47807ada1d547..f1fd06a44a59f95e62d701f9b46fc4e4d0b9ce7e 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -77,6 +77,7 @@ impl TimestampInherentData for InherentData { } } +/// Provide duration since unix epoch in millisecond for timestamp inherent. #[cfg(feature = "std")] pub struct InherentDataProvider; diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 69ec59cf5ef92015bde57140311f6d71f716c6fe..6e30fb4ddc16eb167fa786d27f110e53ab5857ff 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-transaction-pool" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -11,13 +11,14 @@ documentation = "https://docs.rs/sp-transaction-pool" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", optional = true } +codec = { package = "parity-scale-codec", version = "1.3.0", optional = true } derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } +sp-utils = { version = "2.0.0-alpha.5", default-features = false, path = "../utils" } [features] default = [ "std" ] @@ -30,3 +31,6 @@ std = [ "sp-api/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/transaction-pool/src/lib.rs b/primitives/transaction-pool/src/lib.rs index 9abbcfbdf28aaf4a066f8e28fc4fb174d129939c..e4498bd0248801dbe7b2f57cb5917003d5932c62 100644 --- a/primitives/transaction-pool/src/lib.rs +++ b/primitives/transaction-pool/src/lib.rs @@ -29,5 +29,5 @@ mod pool; pub use pool::*; pub use sp_runtime::transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, + TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, }; diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index 89f327a523b5e5a8ad1c7fee3e9ef0394fb812e2..ddc3fffa1530b4c40645b1b7a815284efb866bd9 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -22,16 +22,14 @@ use std::{ sync::Arc, pin::Pin, }; -use futures::{ - Future, Stream, - channel::mpsc, -}; +use futures::{Future, Stream,}; use serde::{Deserialize, Serialize}; +use sp_utils::mpsc; use sp_runtime::{ generic::BlockId, - traits::{Block as BlockT, Member}, + traits::{Block as BlockT, Member, NumberFor}, transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, + TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, }, }; @@ -132,7 +130,7 @@ pub enum TransactionStatus { pub type TransactionStatusStream = dyn Stream> + Send + Unpin; /// The import notification event stream. -pub type ImportNotificationStream = mpsc::UnboundedReceiver; +pub type ImportNotificationStream = mpsc::TracingUnboundedReceiver; /// Transaction hash type for a pool. pub type TxHash

=

::Hash; @@ -192,6 +190,7 @@ pub trait TransactionPool: Send + Sync { fn submit_at( &self, at: &BlockId, + source: TransactionSource, xts: Vec>, ) -> PoolFuture, Self::Error>>, Self::Error>; @@ -199,6 +198,7 @@ pub trait TransactionPool: Send + Sync { fn submit_one( &self, at: &BlockId, + source: TransactionSource, xt: TransactionFor, ) -> PoolFuture, Self::Error>; @@ -206,12 +206,20 @@ pub trait TransactionPool: Send + Sync { fn submit_and_watch( &self, at: &BlockId, + source: TransactionSource, xt: TransactionFor, ) -> PoolFuture>, Self::Error>; // *** Block production / Networking - /// Get an iterator for ready transactions ordered by priority - fn ready(&self) -> Box>>; + /// Get an iterator for ready transactions ordered by priority. + /// + /// Guarantees to return only when transaction pool got updated at `at` block. + /// Guarantees to return immediately when `None` is passed. + fn ready_at(&self, at: NumberFor) + -> Pin> + Send>> + Send>>; + + /// Get an iterator for ready transactions ordered by priority. + fn ready(&self) -> Box> + Send>; // *** Block production /// Remove transactions identified by given hashes (and dependent transactions) from the pool. @@ -292,7 +300,9 @@ impl OffchainSubmitTransaction for TPool { extrinsic ); - let result = futures::executor::block_on(self.submit_one(&at, extrinsic)); + let result = futures::executor::block_on(self.submit_one( + &at, TransactionSource::Local, extrinsic, + )); result.map(|_| ()) .map_err(|e| log::warn!( diff --git a/primitives/transaction-pool/src/runtime_api.rs b/primitives/transaction-pool/src/runtime_api.rs index e30ce7c8289da7b00e33dac71e592c496a3dafb7..fa2e51653b284fcde5ca6362017c4ec813c48a0a 100644 --- a/primitives/transaction-pool/src/runtime_api.rs +++ b/primitives/transaction-pool/src/runtime_api.rs @@ -16,13 +16,27 @@ //! Tagged Transaction Queue Runtime API. -use sp_runtime::transaction_validity::TransactionValidity; +use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource}; use sp_runtime::traits::Block as BlockT; sp_api::decl_runtime_apis! { /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. + #[api_version(2)] pub trait TaggedTransactionQueue { - /// Validate the given transaction. + /// Validate the transaction. + #[changed_in(2)] fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; + + /// Validate the transaction. + /// + /// This method is invoked by the transaction pool to learn details about given transaction. + /// The implementation should make sure to verify the correctness of the transaction + /// against current state. + /// Note that this call may be performed by the pool multiple times and transactions + /// might be verified in any possible order. + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity; } } diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index ff3c04b54193f0778bb5d503e8b3544da5054ebc..3876f4652624be83193661e83ea74f85c108920a 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-trie" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" repository = "https://github.com/paritytech/substrate/" @@ -14,20 +14,20 @@ name = "bench" harness = false [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.20.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } -memory-db = { version = "0.19.0", default-features = false } -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +memory-db = { version = "0.20.0", default-features = false } +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../core" } [dev-dependencies] -trie-bench = "0.20.0" +trie-bench = "0.21.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" -sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../runtime" } [features] default = ["std"] @@ -40,3 +40,6 @@ std = [ "trie-root/std", "sp-core/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/utils/Cargo.toml b/primitives/utils/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..97e5ce1d9b618c2aa250ffd7054f4dbbc6165f14 --- /dev/null +++ b/primitives/utils/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "sp-utils" +version = "2.0.0-alpha.5" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "I/O for Substrate runtimes" + +[dependencies] +futures = "0.3.4" +futures-core = "0.3.4" +lazy_static = "1.4.0" +prometheus = "0.8.0" + +[features] +default = ["metered"] +metered = [] diff --git a/primitives/utils/src/lib.rs b/primitives/utils/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..385a9b6689896102d5e43fc0eda53bb170f0443c --- /dev/null +++ b/primitives/utils/src/lib.rs @@ -0,0 +1,20 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Utilities Primitives for Substrate + +pub mod metrics; +pub mod mpsc; \ No newline at end of file diff --git a/primitives/utils/src/metrics.rs b/primitives/utils/src/metrics.rs new file mode 100644 index 0000000000000000000000000000000000000000..160714fdca1cd8a6bc2864a38fead5bf7fa4cf56 --- /dev/null +++ b/primitives/utils/src/metrics.rs @@ -0,0 +1,58 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Metering primitives and globals + +use lazy_static::lazy_static; +use prometheus::{ + Registry, Error as PrometheusError, + core::{ AtomicU64, GenericGauge, GenericCounter }, +}; + +#[cfg(features = "metered")] +use prometheus::{core::GenericGaugeVec, Opts}; + + +lazy_static! { + pub static ref TOKIO_THREADS_TOTAL: GenericCounter = GenericCounter::new( + "tokio_threads_total", "Total number of threads created" + ).expect("Creating of statics doesn't fail. qed"); + + pub static ref TOKIO_THREADS_ALIVE: GenericGauge = GenericGauge::new( + "tokio_threads_alive", "Number of threads alive right now" + ).expect("Creating of statics doesn't fail. qed"); +} + +#[cfg(features = "metered")] +lazy_static! { + pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericGaugeVec = GenericGaugeVec::new( + Opts::new("unbounded_channel_len", "Items in each mpsc::unbounded instance"), + &["entity", "action"] // 'name of channel, send|received|dropped + ).expect("Creating of statics doesn't fail. qed"); + +} + + +/// Register the statics to report to registry +pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { + registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; + registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; + + #[cfg(features = "metered")] + registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; + + Ok(()) +} diff --git a/primitives/utils/src/mpsc.rs b/primitives/utils/src/mpsc.rs new file mode 100644 index 0000000000000000000000000000000000000000..42fb287c18d892ea204b60131dbea20c9f7ee43c --- /dev/null +++ b/primitives/utils/src/mpsc.rs @@ -0,0 +1,232 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Features to meter unbounded channels + +#[cfg(not(features = "metered"))] +mod inner { + // just aliased, non performance implications + use futures::channel::mpsc::{self, UnboundedReceiver, UnboundedSender}; + pub type TracingUnboundedSender = UnboundedSender; + pub type TracingUnboundedReceiver = UnboundedReceiver; + + /// Alias `mpsc::unbounded` + pub fn tracing_unbounded(_key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + mpsc::unbounded() + } +} + + +#[cfg(features = "metered")] +mod inner { + //tracing implementation + use futures::channel::mpsc::{self, + UnboundedReceiver, UnboundedSender, + TryRecvError, TrySendError, SendError + }; + use futures::{sink::Sink, task::{Poll, Context}, stream::Stream}; + use std::pin::Pin; + use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; + + /// Wrapper Type around `UnboundedSender` that increases the global + /// measure when a message is added + #[derive(Debug, Clone)] + pub struct TracingUnboundedSender(&'static str, UnboundedSender); + + /// Wrapper Type around `UnboundedReceiver` that decreases the global + /// measure when a message is polled + #[derive(Debug)] + pub struct TracingUnboundedReceiver(&'static str, UnboundedReceiver); + + /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via + /// `UNBOUNDED_CHANNELS_COUNTER` + pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { + let (s, r) = mpsc::unbounded(); + (TracingUnboundedSender(key.clone(), s), TracingUnboundedReceiver(key,r)) + } + + impl TracingUnboundedSender { + /// Proxy function to mpsc::UnboundedSender + pub fn poll_ready(&self, ctx: &mut Context) -> Poll> { + self.1.poll_ready(ctx) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn is_closed(&self) -> bool { + self.1.is_closed() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn close_channel(&self) { + self.1.close_channel() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn disconnect(&mut self) { + self.1.disconnect() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { + self.1.start_send(msg) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { + self.1.unbounded_send(msg).map(|s|{ + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"send"]).incr(); + s + }) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn same_receiver(&self, other: &UnboundedSender) -> bool { + self.1.same_receiver(other) + } + } + + impl TracingUnboundedReceiver { + + fn consume(&mut self) { + // consume all items, make sure to reflect the updated count + let mut count = 0; + while let Ok(Some(..)) = self.try_next() { + count += 1; + } + + // and discount the messages + if count > 0 { + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"dropped"]).incr_by(count); + } + + } + + /// Proxy function to mpsc::UnboundedReceiver + /// that consumes all messages first and updates the counter + pub fn close(&mut self) { + self.consume(); + self.1.close() + } + + /// Proxy function to mpsc::UnboundedReceiver + /// that discounts the messages taken out + pub fn try_next(&mut self) -> Result, TryRecvError> { + self.1.try_next().map(|s| { + if s.is_some() { + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"received"]).incr(); + } + s + }) + } + } + + impl Drop for TracingUnboundedReceiver { + fn drop(&mut self) { + self.consume(); + } + } + + impl Unpin for TracingUnboundedReceiver {} + + impl Stream for TracingUnboundedReceiver { + type Item = T; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + let s = self.get_mut(); + match Pin::new(&mut s.1).poll_next(cx) { + Poll::Ready(msg) => { + if msg.is_some() { + UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "received"]).incr(); + } + Poll::Ready(msg) + } + Poll::Pending => { + Poll::Pending + } + } + } + } + + + impl Sink for TracingUnboundedSender { + type Error = SendError; + + fn poll_ready( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + TracingUnboundedSender::poll_ready(&*self, cx) + } + + fn start_send( + mut self: Pin<&mut Self>, + msg: T, + ) -> Result<(), Self::Error> { + TracingUnboundedSender::start_send(&mut *self, msg) + } + + fn poll_flush( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + self.disconnect(); + Poll::Ready(Ok(())) + } + } + + impl Sink for &TracingUnboundedSender { + type Error = SendError; + + fn poll_ready( + self: Pin<&mut Self>, + cx: &mut Context<'_>, + ) -> Poll> { + TracingUnboundedSender::poll_ready(*self, cx) + } + + fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { + self.unbounded_send(msg) + .map_err(TrySendError::into_send_error) + } + + fn poll_flush( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close( + self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + self.close_channel(); + Poll::Ready(Ok(())) + } + } +} + +pub use inner::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index edaf940465bd679cf94e9c319bc681d3ee680516..726d064642fb355a9d037ac840c44a5708db2850 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-version" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -13,9 +13,9 @@ documentation = "https://docs.rs/sp-version" [dependencies] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../runtime" } [features] default = ["std"] @@ -26,3 +26,6 @@ std = [ "sp-std/std", "sp-runtime/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index bbe53df258271b74c4fd36a035368c915cc28768..4a35d5b5180ff07529762e003370e7d2f21eef2b 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "sp-wasm-interface" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -12,9 +12,12 @@ documentation = "https://docs.rs/sp-wasm-interface" [dependencies] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0-alpha.2", path = "../std", default-features = false } -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.5", path = "../std", default-features = false } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] std = [ "wasmi", "sp-std/std", "codec/std" ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index d3477b94a6f95e608b6df5c75ca03845af386825..b8c9f9bd606059b73adb080f68c78dda3e8026a5 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "substrate-test-utils" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" homepage = "https://substrate.dev" repository = "https://github.com/paritytech/substrate/" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 77a614e5fecca7ed6686da5e846dfe975ab4e64b..ec87e7cd168f05585f9f70a25793fc4840af5017 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -9,16 +9,19 @@ repository = "https://github.com/paritytech/substrate/" publish = false [dependencies] -sc-client-api = { version = "2.0.0-alpha.2", path = "../../client/api" } -sc-client = { version = "0.8.0-alpha.2", path = "../../client/" } -sc-client-db = { version = "0.8.0-alpha.2", features = ["test-helpers"], path = "../../client/db" } -sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../client/executor" } -futures = "0.3.1" +sc-client-api = { version = "2.0.0-alpha.5", path = "../../client/api" } +sc-client = { version = "0.8.0-alpha.5", path = "../../client/" } +sc-client-db = { version = "0.8.0-alpha.5", features = ["test-helpers"], path = "../../client/db" } +sp-consensus = { version = "0.8.0-alpha.5", path = "../../primitives/consensus/common" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../client/executor" } +futures = "0.3.4" hash-db = "0.15.2" -sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-keyring = { version = "2.0.0-alpha.5", path = "../../primitives/keyring" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-core = { version = "2.0.0-alpha.5", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../primitives/runtime" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../primitives/blockchain" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index 104a800d2114bf95388dded53f1a0b282d07a9d8..d04e85fd10c2330161dc3636deb66cc4ba26b710 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -23,7 +23,7 @@ pub mod client_ext; pub use sc_client::{blockchain, self}; pub use sc_client_api::{ execution_extensions::{ExecutionStrategies, ExecutionExtensions}, - ForkBlocks, BadBlocks, + ForkBlocks, BadBlocks, CloneableSpawn, }; pub use sc_client_db::{Backend, self}; pub use sp_consensus; @@ -33,7 +33,7 @@ pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, }; -pub use sp_core::traits::BareCryptoStorePtr; +pub use sp_core::{traits::BareCryptoStorePtr, tasks::executor as tasks_executor}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; pub use self::client_ext::{ClientExt, ClientBlockImportExt}; @@ -246,7 +246,7 @@ impl TestClientBuilder< let executor = executor.into().unwrap_or_else(|| NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) ); - let executor = LocalCallExecutor::new(self.backend.clone(), executor); + let executor = LocalCallExecutor::new(self.backend.clone(), executor, tasks_executor()); self.build_with_executor(executor) } diff --git a/test-utils/runtime/Cargo.toml b/test-utils/runtime/Cargo.toml index 629656f10fbe27c5b6ad2370e860690c79ea40e8..be22747ea69166d91f83102e79171c18c8e5441a 100644 --- a/test-utils/runtime/Cargo.toml +++ b/test-utils/runtime/Cargo.toml @@ -10,43 +10,43 @@ repository = "https://github.com/paritytech/substrate/" publish = false [dependencies] -sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/block-builder" } +sp-application-crypto = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.8.0-alpha.5", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/block-builder" } cfg-if = "0.1.10" -codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } +codec = { package = "parity-scale-codec", version = "1.3.0", default-features = false, features = ["derive"] } +frame-executive = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.5", optional = true, path = "../../primitives/keyring" } log = { version = "0.4.8", optional = true } -memory-db = { version = "0.19.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-alpha.2"} -sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-alpha.2"} -sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } +memory-db = { version = "0.20.0", default-features = false } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-alpha.5"} +sp-core = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-alpha.5"} +sp-io = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/support" } +sp-version = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/version" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/timestamp" } -sc-client = { version = "0.8.0-alpha.2", optional = true, path = "../../client" } -sp-trie = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/transaction-pool" } +sp-session = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/session" } +sp-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/babe" } +frame-system = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "2.0.0-alpha.5", default-features = false, path = "../../frame/timestamp" } +sc-client = { version = "0.8.0-alpha.5", optional = true, path = "../../client" } +sp-trie = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "2.0.0-alpha.5", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.20.0", default-features = false } -parity-util-mem = { version = "0.5.2", default-features = false, features = ["primitive-types"] } +parity-util-mem = { version = "0.6.0", default-features = false, features = ["primitive-types"] } [dev-dependencies] -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../client/block-builder" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../client/executor" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../client/block-builder" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../client/executor" } substrate-test-runtime-client = { version = "2.0.0-dev", path = "./client" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../primitives/state-machine" } [build-dependencies] wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } @@ -87,3 +87,6 @@ std = [ "sp-transaction-pool/std", "trie-db/std", ] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 5f16e77860ba68083ce8c4e1ff6088cb78d156fc..4be45fe46659a2583df6c02b2c67a32f8bf64a18 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -9,14 +9,17 @@ repository = "https://github.com/paritytech/substrate/" publish = false [dependencies] -sc-block-builder = { version = "0.8.0-alpha.2", path = "../../../client/block-builder" } +sc-block-builder = { version = "0.8.0-alpha.5", path = "../../../client/block-builder" } substrate-test-client = { version = "2.0.0-dev", path = "../../client" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } substrate-test-runtime = { version = "2.0.0-dev", path = "../../runtime" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } -futures = "0.3.1" +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sc-client-api = { version = "2.0.0-alpha.5", path = "../../../client/api" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client/" } +futures = "0.3.4" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index ccaf644b82618758812dcf558810afe01492aa67..f0a405e67e469012f031bb0e3ab676a39e7c5c5b 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -349,7 +349,7 @@ pub fn new_light() -> ( let blockchain = Arc::new(sc_client::light::blockchain::Blockchain::new(storage)); let backend = Arc::new(LightBackend::new(blockchain.clone())); let executor = new_native_executor(); - let local_call_executor = sc_client::LocalCallExecutor::new(backend.clone(), executor); + let local_call_executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor()); let call_executor = LightExecutor::new( backend.clone(), local_call_executor, diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 59955cce48693ea4a478378a404d116e32e2a791..c0aea9a2ab599ddc8a3366f10c7d6389aa83b18d 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -36,6 +36,7 @@ use sp_runtime::{ ApplyExtrinsicResult, create_runtime_str, Perbill, impl_opaque_keys, transaction_validity::{ TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, + TransactionSource, }, traits::{ BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, @@ -180,6 +181,16 @@ impl ExtrinsicT for Extrinsic { } } +impl sp_runtime::traits::Dispatchable for Extrinsic { + type Origin = (); + type Trait = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + panic!("This implemention should not be used for actual dispatch."); + } +} + impl Extrinsic { pub fn transfer(&self) -> &Transfer { match self { @@ -492,7 +503,10 @@ cfg_if! { } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { + fn validate_transaction( + _source: TransactionSource, + utx: ::Extrinsic, + ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction { priority: data.len() as u64, @@ -679,7 +693,10 @@ cfg_if! { } impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction(utx: ::Extrinsic) -> TransactionValidity { + fn validate_transaction( + _source: TransactionSource, + utx: ::Extrinsic, + ) -> TransactionValidity { if let Extrinsic::IncludeData(data) = utx { return Ok(ValidTransaction{ priority: data.len() as u64, diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index d8790d5317af05732a3a34a6b954dd590c57fee6..c35850ae950e928ae46bb882457f1bf3ef3baa83 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -46,7 +46,7 @@ decl_module! { decl_storage! { trait Store for Module as TestRuntime { - ExtrinsicData: map hasher(blake2_256) u32 => Vec; + ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; // The current block number being processed. Set by `execute_block`. Number get(fn number): Option; ParentHash get(fn parent_hash): Hash; diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 3e22da468f1e39d914243d8aa3cdf44fdd98a658..52e2020dc8dd18dfc82de735e66b98b469bfefb9 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -11,10 +11,13 @@ publish = false [dependencies] substrate-test-runtime-client = { version = "2.0.0-dev", path = "../client" } parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.2.0" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool/graph" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "2.0.0-alpha.5", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 8cd4e58954b73c931f89b7178fee92896ce21c5e..432c9e520d1b178c0dc2567a35be82873d9ee687 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -25,6 +25,7 @@ use sp_runtime::{ traits::{BlakeTwo256, Hash as HashT}, transaction_validity::{ TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, + TransactionSource, }, }; use std::collections::{HashSet, HashMap}; @@ -180,6 +181,7 @@ impl sc_transaction_graph::ChainApi for TestApi { fn validate_transaction( &self, _at: &BlockId, + _source: TransactionSource, uxt: sc_transaction_graph::ExtrinsicFor, ) -> Self::ValidationFuture { self.validation_requests.write().push(uxt.clone()); diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 205f42d61fb14fc9dc119584901e7ea17355fe0b..5f6b18e00148c5e34e694b1d86113bb873b88461 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-browser-utils" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" @@ -12,17 +12,17 @@ repository = "https://github.com/paritytech/substrate/" futures = "0.3" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p-wasm-ext = { version = "0.16.2", features = ["websocket"] } +libp2p-wasm-ext = { version = "0.17.0", features = ["websocket"] } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" -kvdb-web = "0.4" -sc-informant = { version = "0.8.0-alpha.2", path = "../../client/informant" } -sc-service = { version = "0.8.0-alpha.2", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network" , version = "0.8.0-alpha.2"} -sc-chain-spec = { path = "../../client/chain-spec" , version = "2.0.0-alpha.2"} +kvdb-web = "0.5" +sc-informant = { version = "0.8.0-alpha.5", path = "../../client/informant" } +sc-service = { version = "0.8.0-alpha.5", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network", version = "0.8.0-alpha.5"} +sc-chain-spec = { path = "../../client/chain-spec", version = "2.0.0-alpha.5"} # Imported just for the `no_cc` feature clear_on_drop = { version = "0.2.3", features = ["no_cc"] } @@ -31,3 +31,6 @@ rand6 = { package = "rand", version = "0.6", features = ["wasm-bindgen"] } rand = { version = "0.7", features = ["wasm-bindgen"] } futures-timer = { version = "3.0.1", features = ["wasm-bindgen"]} chrono = { version = "0.4", features = ["wasmbind"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index c9c159efe3a359effb1174ea2b4d0728239f525f..8bed06eee9621ead29f8200ae9d590ca0b423075 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -17,8 +17,10 @@ use futures01::sync::mpsc as mpsc01; use log::{debug, info}; use std::sync::Arc; +use sc_network::config::TransportConfig; use sc_service::{ - AbstractService, RpcSession, Roles, Configuration, config::{DatabaseConfig, KeystoreConfig}, + AbstractService, RpcSession, Role, Configuration, + config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, GenericChainSpec, RuntimeGenesis }; use wasm_bindgen::prelude::*; @@ -38,34 +40,62 @@ pub async fn browser_configuration(chain_spec: GenericChainSpec) -> Result> where G: RuntimeGenesis + 'static, - E: Extension + 'static, + E: Extension + 'static + Send, { let name = chain_spec.name().to_string(); let transport = ExtTransport::new(ffi::websocket_transport()); - let mut config = Configuration::default(); - config.network.boot_nodes = chain_spec.boot_nodes().to_vec(); - config.telemetry_endpoints = chain_spec.telemetry_endpoints().clone(); - config.chain_spec = Some(Box::new(chain_spec)); - config.network.transport = sc_network::config::TransportConfig::Normal { + let mut network = NetworkConfiguration::new( + format!("{} (Browser)", name), + "unknown", + Default::default(), + &std::env::current_dir().expect("current directory must exist"), + ); + network.boot_nodes = chain_spec.boot_nodes().to_vec(); + network.transport = TransportConfig::Normal { wasm_external_transport: Some(transport.clone()), allow_private_ipv4: true, enable_mdns: false, use_yamux_flow_control: true, }; - config.task_executor = Some(Arc::new(move |fut| { - wasm_bindgen_futures::spawn_local(fut) - })); - config.telemetry_external_transport = Some(transport); - config.roles = Roles::LIGHT; - config.name = format!("{} (Browser)", name); - config.database = Some({ - info!("Opening Indexed DB database '{}'...", name); - let db = kvdb_web::Database::open(name, 10) - .await?; - DatabaseConfig::Custom(Arc::new(db)) - }); - config.keystore = KeystoreConfig::InMemory; + + let config = Configuration { + network, + telemetry_endpoints: chain_spec.telemetry_endpoints().clone(), + chain_spec: Box::new(chain_spec), + task_executor: Arc::new(move |fut| wasm_bindgen_futures::spawn_local(fut)), + telemetry_external_transport: Some(transport), + role: Role::Light, + database: { + info!("Opening Indexed DB database '{}'...", name); + let db = kvdb_web::Database::open(name, 10).await?; + + DatabaseConfig::Custom(Arc::new(db)) + }, + keystore: KeystoreConfig::InMemory, + default_heap_pages: Default::default(), + dev_key_seed: Default::default(), + disable_grandpa: Default::default(), + execution_strategies: Default::default(), + force_authoring: Default::default(), + impl_name: "parity-substrate", + impl_version: "0.0.0", + offchain_worker: Default::default(), + prometheus_config: Default::default(), + pruning: Default::default(), + rpc_cors: Default::default(), + rpc_http: Default::default(), + rpc_ws: Default::default(), + rpc_ws_max_connections: Default::default(), + state_cache_child_ratio: Default::default(), + state_cache_size: Default::default(), + tracing_receiver: Default::default(), + tracing_targets: Default::default(), + transaction_pool: Default::default(), + wasm_method: Default::default(), + max_runtime_instances: 8, + announce_block: true, + }; Ok(config) } diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 5903d107fbbe138b123e4c27aafe3b7dfecbbadb..637d50d19dde2c36265d920a5b3b545697549adc 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,3 +9,7 @@ repository = "https://github.com/paritytech/substrate/" description = "Crate with utility functions for `build.rs` scripts." [dependencies] +platforms = "0.2.1" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs new file mode 100644 index 0000000000000000000000000000000000000000..10f5446cb44f709e9208b89105d8246bf14ea23b --- /dev/null +++ b/utils/build-script-utils/src/git.rs @@ -0,0 +1,124 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::{env, fs, fs::File, io, io::Read, path::PathBuf}; + +/// Make sure the calling `build.rs` script is rerun when `.git/HEAD` or the ref of `.git/HEAD` +/// changed. +/// +/// The file is searched from the `CARGO_MANIFEST_DIR` upwards. If the file can not be found, +/// a warning is generated. +pub fn rerun_if_git_head_changed() { + let mut manifest_dir = PathBuf::from( + env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo."), + ); + let manifest_dir_copy = manifest_dir.clone(); + + while manifest_dir.parent().is_some() { + match get_git_paths(&manifest_dir) { + Err(err) => { + eprintln!("cargo:warning=Unable to read the Git repository: {}", err); + + return; + } + Ok(None) => {} + Ok(Some(paths)) => { + for p in paths { + println!("cargo:rerun-if-changed={}", p.display()); + } + + return; + } + } + + manifest_dir.pop(); + } + + println!( + "cargo:warning=Could not find `.git/HEAD` searching from `{}` upwards!", + manifest_dir_copy.display(), + ); +} + +// Code taken from https://github.com/rustyhorde/vergen/blob/8d522db8c8e16e26c0fc9ea8e6b0247cbf5cca84/src/output/envvar.rs +fn get_git_paths(path: &PathBuf) -> Result>, io::Error> { + let git_dir_or_file = path.join(".git"); + + if let Ok(metadata) = fs::metadata(&git_dir_or_file) { + if metadata.is_dir() { + // Echo the HEAD path + let git_head_path = git_dir_or_file.join("HEAD"); + + // Determine where HEAD points and echo that path also. + let mut f = File::open(&git_head_path)?; + let mut git_head_contents = String::new(); + let _ = f.read_to_string(&mut git_head_contents)?; + let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); + + if ref_vec.len() == 2 { + let current_head_file = ref_vec[1]; + let git_refs_path = git_dir_or_file.join(current_head_file); + + Ok(Some(vec![git_head_path, git_refs_path])) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "You are most likely in a detached HEAD state", + )) + } + } else if metadata.is_file() { + // We are in a worktree, so find out where the actual worktrees//HEAD file is. + let mut git_file = File::open(&git_dir_or_file)?; + let mut git_contents = String::new(); + let _ = git_file.read_to_string(&mut git_contents)?; + let dir_vec: Vec<&str> = git_contents.split(": ").collect(); + let git_path = dir_vec[1].trim(); + + // Echo the HEAD psth + let git_head_path = PathBuf::from(git_path).join("HEAD"); + + // Find out what the full path to the .git dir is. + let mut actual_git_dir = PathBuf::from(git_path); + actual_git_dir.pop(); + actual_git_dir.pop(); + + // Determine where HEAD points and echo that path also. + let mut f = File::open(&git_head_path)?; + let mut git_head_contents = String::new(); + let _ = f.read_to_string(&mut git_head_contents)?; + let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); + + if ref_vec.len() == 2 { + let current_head_file = ref_vec[1]; + let git_refs_path = actual_git_dir.join(current_head_file); + + Ok(Some(vec![git_head_path, git_refs_path])) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "You are most likely in a detached HEAD state", + )) + } + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "Invalid .git format (Not a directory or a file)", + )) + } + } else { + Ok(None) + } +} diff --git a/utils/build-script-utils/src/lib.rs b/utils/build-script-utils/src/lib.rs index 1b915bdcafacb1e5ecc0b81c60d062d0537ed496..57a1e7c5cdb3e8cae406b4e6d49b1dbc8cc27fb6 100644 --- a/utils/build-script-utils/src/lib.rs +++ b/utils/build-script-utils/src/lib.rs @@ -16,29 +16,8 @@ //! Crate with utility functions for `build.rs` scripts. -use std::{env, path::PathBuf}; +mod version; +mod git; -/// Make sure the calling `build.rs` script is rerun when `.git/HEAD` changed. -/// -/// The file is searched from the `CARGO_MANIFEST_DIR` upwards. If the file can not be found, -/// a warning is generated. -pub fn rerun_if_git_head_changed() { - let mut manifest_dir = PathBuf::from( - env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo.") - ); - let manifest_dir_copy = manifest_dir.clone(); - - while manifest_dir.parent().is_some() { - if manifest_dir.join(".git/HEAD").exists() { - println!("cargo:rerun-if-changed={}", manifest_dir.join(".git/HEAD").display()); - return - } - - manifest_dir.pop(); - } - - println!( - "cargo:warning=Could not find `.git/HEAD` searching from `{}` upwards!", - manifest_dir_copy.display(), - ); -} +pub use git::*; +pub use version::*; diff --git a/utils/build-script-utils/src/version.rs b/utils/build-script-utils/src/version.rs new file mode 100644 index 0000000000000000000000000000000000000000..01a97c6f383dde71f383cb0a9d5e617a380d0ac7 --- /dev/null +++ b/utils/build-script-utils/src/version.rs @@ -0,0 +1,66 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use platforms::*; +use std::{borrow::Cow, process::Command}; + +/// Generate the `cargo:` key output +pub fn generate_cargo_keys() { + let output = Command::new("git") + .args(&["rev-parse", "--short", "HEAD"]) + .output(); + + let commit = match output { + Ok(o) if o.status.success() => { + let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); + Cow::from(sha) + } + Ok(o) => { + println!("cargo:warning=Git command failed with status: {}", o.status); + Cow::from("unknown-commit") + }, + Err(err) => { + println!("cargo:warning=Failed to execute git command: {}", err); + Cow::from("unknown-commit") + }, + }; + + println!("cargo:rustc-env=SUBSTRATE_CLI_IMPL_VERSION={}", get_version(&commit)) +} + +fn get_platform() -> String { + let env_dash = if TARGET_ENV.is_some() { "-" } else { "" }; + + format!( + "{}-{}{}{}", + TARGET_ARCH.as_str(), + TARGET_OS.as_str(), + env_dash, + TARGET_ENV.map(|x| x.as_str()).unwrap_or(""), + ) +} + +fn get_version(impl_commit: &str) -> String { + let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; + + format!( + "{}{}{}-{}", + std::env::var("CARGO_PKG_VERSION").unwrap_or_default(), + commit_dash, + impl_commit, + get_platform(), + ) +} diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 8ade20cd7b7e29c44900be94ae8fe06bb0f8cf49..e46618feb8e5a9d8dc4aed8f100c5f6bfcea1323 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "fork-tree" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -10,4 +10,7 @@ description = "Utility library for managing tree-like ordered data with logic fo documentation = "https://docs.rs/fork-tree" [dependencies] -codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.3.0", features = ["derive"] } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index d0cf9c9f37ecef0cac625d5df83702eafd815e38..1e7b48fed07076a29a184579dc79f622c2e028ac 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -93,41 +93,77 @@ impl ForkTree where /// node. Otherwise the tree remains unchanged. The given function /// `is_descendent_of` should return `true` if the second hash (target) is a /// descendent of the first hash (base). + /// + /// Returns all pruned node data. pub fn prune( &mut self, hash: &H, number: &N, is_descendent_of: &F, predicate: &P, - ) -> Result<(), Error> + ) -> Result, Error> where E: std::error::Error, F: Fn(&H, &H) -> Result, P: Fn(&V) -> bool, { - let new_root = self.find_node_where( + let new_root_index = self.find_node_index_where( hash, number, is_descendent_of, predicate, )?; - if let Some(root) = new_root { - let mut root = root.clone(); + let removed = if let Some(mut root_index) = new_root_index { + let mut old_roots = std::mem::replace(&mut self.roots, Vec::new()); + + let mut root = None; + let mut cur_children = Some(&mut old_roots); + + while let Some(cur_index) = root_index.pop() { + if let Some(children) = cur_children.take() { + if root_index.is_empty() { + root = Some(children.remove(cur_index)); + } else { + cur_children = Some(&mut children[cur_index].children); + } + } + } + + let mut root = root + .expect("find_node_index_where will return array with at least one index; \ + this results in at least one item in removed; qed"); + + let mut removed = old_roots; // we found the deepest ancestor of the finalized block, so we prune // out any children that don't include the finalized block. - let children = std::mem::replace(&mut root.children, Vec::new()); - root.children = children.into_iter().filter(|node| { - node.number == *number && node.hash == *hash || - node.number < *number && is_descendent_of(&node.hash, hash).unwrap_or(false) - }).take(1).collect(); + let root_children = std::mem::replace(&mut root.children, Vec::new()); + let mut is_first = true; + + for child in root_children { + if is_first && + (child.number == *number && child.hash == *hash || + child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) + { + root.children.push(child); + // assuming that the tree is well formed only one child should pass this requirement + // due to ancestry restrictions (i.e. they must be different forks). + is_first = false; + } else { + removed.push(child); + } + } self.roots = vec![root]; - } + + removed + } else { + Vec::new() + }; self.rebalance(); - Ok(()) + Ok(RemovedIterator { stack: removed }) } } @@ -250,6 +286,26 @@ impl ForkTree where Ok(None) } + /// Map fork tree into values of new types. + pub fn map( + self, + f: &mut F, + ) -> ForkTree where + F: FnMut(&H, &N, V) -> VT, + { + let roots = self.roots + .into_iter() + .map(|root| { + root.map(f) + }) + .collect(); + + ForkTree { + roots, + best_finalized_number: self.best_finalized_number, + } + } + /// Same as [`find_node_where`](Self::find_node_where), but returns mutable reference. pub fn find_node_where_mut( &mut self, @@ -275,6 +331,32 @@ impl ForkTree where Ok(None) } + /// Same as [`find_node_where`](Self::find_node_where), but returns indexes. + pub fn find_node_index_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for (index, root) in self.roots.iter().enumerate() { + let node = root.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(mut node) = node { + node.push(index); + return Ok(Some(node)); + } + } + + Ok(None) + } + /// Finalize a root in the tree and return it, return `None` in case no root /// with the given hash exists. All other roots are pruned, and the children /// of the finalized node become the new roots. @@ -588,6 +670,29 @@ mod node_implementation { max + 1 } + /// Map node data into values of new types. + pub fn map( + self, + f: &mut F, + ) -> Node where + F: FnMut(&H, &N, V) -> VT, + { + let children = self.children + .into_iter() + .map(|node| { + node.map(f) + }) + .collect(); + + let vt = f(&self.hash, &self.number, self.data); + Node { + hash: self.hash, + number: self.number, + data: vt, + children, + } + } + pub fn import( &mut self, mut hash: H, @@ -780,6 +885,27 @@ impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { } } +struct RemovedIterator { + stack: Vec>, +} + +impl Iterator for RemovedIterator { + type Item = (H, N, V); + + fn next(&mut self) -> Option { + self.stack.pop().map(|mut node| { + // child nodes are stored ordered by max branch height (decreasing), + // we want to keep this ordering while iterating but since we're + // using a stack for iterator state we need to reverse it. + let mut children = Vec::new(); + std::mem::swap(&mut children, &mut node.children); + + self.stack.extend(children.into_iter().rev()); + (node.hash, node.number, node.data) + }) + } +} + #[cfg(test)] mod test { use super::{FinalizationResult, ForkTree, Error}; @@ -805,7 +931,7 @@ mod test { // / / // A - F - H - I // \ - // - L - M - N + // - L - M // \ // - O // \ @@ -813,22 +939,21 @@ mod test { // // (where N is not a part of fork tree) let is_descendent_of = |base: &&str, block: &&str| -> Result { - let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L"]; + let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O"]; match (*base, *block) { ("A", b) => Ok(letters.into_iter().any(|n| n == b)), ("B", b) => Ok(b == "C" || b == "D" || b == "E"), ("C", b) => Ok(b == "D" || b == "E"), ("D", b) => Ok(b == "E"), ("E", _) => Ok(false), - ("F", b) => Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "N" || b == "O"), + ("F", b) => Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), ("G", _) => Ok(false), ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "O"), ("I", _) => Ok(false), ("J", b) => Ok(b == "K"), ("K", _) => Ok(false), - ("L", b) => Ok(b == "M" || b == "O" || b == "N"), - ("M", b) => Ok(b == "N"), - ("N", _) => Ok(false), + ("L", b) => Ok(b == "M" || b == "O"), + ("M", _) => Ok(false), ("O", _) => Ok(false), ("0", _) => Ok(true), _ => Ok(false), @@ -1324,11 +1449,18 @@ mod test { assert_eq!(node.number, 3); } + #[test] + fn map_works() { + let (tree, _is_descendent_of) = test_fork_tree(); + + let _tree = tree.map(&mut |_, _, _| ()); + } + #[test] fn prune_works() { let (mut tree, is_descendent_of) = test_fork_tree(); - tree.prune( + let removed = tree.prune( &"C", &3, &is_descendent_of, @@ -1345,7 +1477,12 @@ mod test { vec!["B", "C", "D", "E"], ); - tree.prune( + assert_eq!( + removed.map(|(hash, _, _)| hash).collect::>(), + vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] + ); + + let removed = tree.prune( &"E", &5, &is_descendent_of, @@ -1361,6 +1498,11 @@ mod test { tree.iter().map(|(hash, _, _)| *hash).collect::>(), vec!["D", "E"], ); + + assert_eq!( + removed.map(|(hash, _, _)| hash).collect::>(), + vec!["B", "C"] + ); } #[test] diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index a14b37dd46833590ca3f8923b79f9dec3f7290e8..f4b72187478d47c63575c01ad27d86500c160816 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,13 +9,22 @@ repository = "https://github.com/paritytech/substrate/" description = "CLI for benchmarking FRAME" [dependencies] -frame-benchmarking = { version = "2.0.0-alpha.2", path = "../../../frame/benchmarking" } -sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } -sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } -sc-client = { version = "0.8.0-alpha.2", path = "../../../client" } -sc-client-db = { version = "0.8.0-alpha.2", path = "../../../client/db" } -sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +frame-benchmarking = { version = "2.0.0-alpha.5", path = "../../../frame/benchmarking" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../primitives/core" } +sc-service = { version = "0.8.0-alpha.5", default-features = false, path = "../../../client/service" } +sc-cli = { version = "0.8.0-alpha.5", path = "../../../client/cli" } +sc-client = { version = "0.8.0-alpha.5", path = "../../../client" } +sc-client-db = { version = "0.8.0-alpha.5", path = "../../../client/db" } +sc-executor = { version = "0.8.0-alpha.5", path = "../../../client/executor" } +sp-externalities = { version = "0.8.0-alpha.5", path = "../../../primitives/externalities" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-alpha.5", path = "../../../primitives/state-machine" } structopt = "0.3.8" -codec = { version = "1.2.0", package = "parity-scale-codec" } +codec = { version = "1.3.0", package = "parity-scale-codec" } + +[features] +default = ["rocksdb"] +rocksdb = ["sc-client-db/kvdb-rocksdb"] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs new file mode 100644 index 0000000000000000000000000000000000000000..5e35d57cdaee9eb6b7523f4a17b38d1aa70d7b5b --- /dev/null +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -0,0 +1,141 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::BenchmarkCmd; +use codec::{Decode, Encode}; +use frame_benchmarking::{Analysis, BenchmarkBatch}; +use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; +use sc_client::StateMachine; +use sc_client_db::BenchmarkingState; +use sc_executor::NativeExecutor; +use sp_externalities::Extensions; +use sc_service::{Configuration, NativeExecutionDispatch}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, +}; +use sp_core::{tasks, testing::KeyStore, traits::KeystoreExt}; +use std::fmt::Debug; + +impl BenchmarkCmd { + /// Runs the command and benchmarks the chain. + pub fn run(&self, config: Configuration) -> Result<()> + where + BB: BlockT + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + ExecDispatch: NativeExecutionDispatch + 'static, + { + let spec = config.chain_spec; + let wasm_method = self.wasm_method.into(); + let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); + + let genesis_storage = spec.build_storage()?; + let mut changes = Default::default(); + let cache_size = Some(self.database_cache_size as usize); + let state = BenchmarkingState::::new(genesis_storage, cache_size)?; + let executor = NativeExecutor::::new( + wasm_method, + None, // heap pages + 2, // The runtime instances cache size. + ); + + let mut extensions = Extensions::default(); + extensions.register(KeystoreExt(KeyStore::new())); + + let result = StateMachine::<_, _, NumberFor, _>::new( + &state, + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &self.pallet, + &self.extrinsic, + self.lowest_range_values.clone(), + self.highest_range_values.clone(), + self.steps.clone(), + self.repeat, + ).encode(), + extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, + tasks::executor(), + ) + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; + + let results = , String> as Decode>::decode(&mut &result[..]) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; + + match results { + Ok(batches) => for batch in batches.into_iter() { + // Print benchmark metadata + println!( + "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", + String::from_utf8(batch.pallet).expect("Encoded from String; qed"), + String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), + self.lowest_range_values, + self.highest_range_values, + self.steps, + self.repeat, + ); + + if self.raw_data { + // Print the table header + batch.results[0].0.iter().for_each(|param| print!("{:?},", param.0)); + + print!("extrinsic_time,storage_root_time\n"); + // Print the values + batch.results.iter().for_each(|result| { + let parameters = &result.0; + parameters.iter().for_each(|param| print!("{:?},", param.1)); + // Print extrinsic time and storage root time + print!("{:?},{:?}\n", result.1, result.2); + }); + + println!(); + } + + // Conduct analysis. + if !self.no_median_slopes { + if let Some(analysis) = Analysis::median_slopes(&batch.results) { + println!("Median Slopes Analysis\n========\n{}", analysis); + } + } + if !self.no_min_squares { + if let Some(analysis) = Analysis::min_squares_iqr(&batch.results) { + println!("Min Squares Analysis\n========\n{}", analysis); + } + } + }, + Err(error) => eprintln!("Error: {:?}", error), + } + + Ok(()) + } +} + +impl CliConfiguration for BenchmarkCmd { + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn chain_id(&self, _is_dev: bool) -> Result { + Ok(match self.shared_params.chain { + Some(ref chain) => chain.clone(), + None => "dev".into(), + }) + } +} diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 4467753c357d52394992b8b78343f5af3f62812a..96204d1ae576382feb3ac68ab9b5c26ed5a90d61 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -14,24 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +mod command; + +use sc_cli::{ExecutionStrategy, WasmExecutionMethod}; use std::fmt::Debug; -use sp_runtime::{traits::{Block as BlockT, Header as HeaderT, NumberFor}}; -use sc_client::StateMachine; -use sc_cli::{ExecutionStrategy, WasmExecutionMethod, VersionInfo}; -use sc_client_db::BenchmarkingState; -use sc_service::{Configuration, ChainSpec}; -use sc_executor::{NativeExecutor, NativeExecutionDispatch}; -use codec::{Encode, Decode}; -use frame_benchmarking::BenchmarkResults; /// The `benchmark` command used to benchmark FRAME Pallets. #[derive(Debug, structopt::StructOpt, Clone)] pub struct BenchmarkCmd { - /// Select a FRAME Pallet to benchmark. + /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). #[structopt(short, long)] pub pallet: String, - /// Select an extrinsic to benchmark. + /// Select an extrinsic inside the pallet to benchmark, or `*` for all. #[structopt(short, long)] pub extrinsic: String, @@ -40,17 +35,29 @@ pub struct BenchmarkCmd { pub steps: Vec, /// Indicates lowest values for each of the component ranges. - #[structopt(long, use_delimiter = true)] + #[structopt(long = "low", use_delimiter = true)] pub lowest_range_values: Vec, /// Indicates highest values for each of the component ranges. - #[structopt(long, use_delimiter = true)] + #[structopt(long = "high", use_delimiter = true)] pub highest_range_values: Vec, /// Select how many repetitions of this benchmark should run. #[structopt(short, long, default_value = "1")] pub repeat: u32, + /// Print the raw results. + #[structopt(long = "raw")] + pub raw_data: bool, + + /// Don't print the median-slopes linear regression analysis. + #[structopt(long)] + pub no_median_slopes: bool, + + /// Don't print the min-squares linear regression analysis. + #[structopt(long)] + pub no_min_squares: bool, + #[allow(missing_docs)] #[structopt(flatten)] pub shared_params: sc_cli::SharedParams, @@ -73,110 +80,8 @@ pub struct BenchmarkCmd { default_value = "Interpreted" )] pub wasm_method: WasmExecutionMethod, -} -impl BenchmarkCmd { - /// Initialize - pub fn init(&self, version: &sc_cli::VersionInfo) -> sc_cli::Result<()> { - self.shared_params.init(version) - } - - /// Runs the command and benchmarks the chain. - pub fn run( - self, - config: Configuration, - ) -> sc_cli::Result<()> - where - BB: BlockT + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - ExecDispatch: NativeExecutionDispatch + 'static, - { - let spec = config.chain_spec.expect("chain_spec is always Some"); - let wasm_method = self.wasm_method.into(); - let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); - - let genesis_storage = spec.build_storage()?; - let mut changes = Default::default(); - let state = BenchmarkingState::::new(genesis_storage)?; - let executor = NativeExecutor::::new( - wasm_method, - None, // heap pages - 2, // The runtime instances cache size. - ); - - let result = StateMachine::<_, _, NumberFor, _>::new( - &state, - None, - &mut changes, - &executor, - "Benchmark_dispatch_benchmark", - &( - &self.pallet, - &self.extrinsic, - self.lowest_range_values.clone(), - self.highest_range_values.clone(), - self.steps.clone(), - self.repeat, - ).encode(), - Default::default(), - &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, - ) - .execute(strategy.into()) - .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - - let results = , String> as Decode>::decode(&mut &result[..]) - .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; - - match results { - Ok(results) => { - // Print benchmark metadata - println!( - "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", - self.pallet, - self.extrinsic, - self.lowest_range_values, - self.highest_range_values, - self.steps, - self.repeat, - ); - - // Print the table header - results[0].0.iter().for_each(|param| print!("{:?},", param.0)); - - print!("extrinsic_time,storage_root_time\n"); - // Print the values - results.iter().for_each(|result| { - let parameters = &result.0; - parameters.iter().for_each(|param| print!("{:?},", param.1)); - // Print extrinsic time and storage root time - print!("{:?},{:?}\n", result.1, result.2); - }); - - eprintln!("Done."); - } - Err(error) => eprintln!("Error: {:?}", error), - } - - Ok(()) - } - - /// Update and prepare a `Configuration` with command line parameters - pub fn update_config( - &self, - mut config: &mut Configuration, - spec_factory: impl FnOnce(&str) -> Result, String>, - _version: &VersionInfo, - ) -> sc_cli::Result<()> - { - // Configure chain spec. - let chain_key = self.shared_params.chain.clone().unwrap_or("dev".into()); - let spec = spec_factory(&chain_key)?; - config.chain_spec = Some(spec); - - // Make sure to configure keystore. - config.use_in_memory_keystore()?; - - Ok(()) - } + /// Limit the memory the database cache can use. + #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] + pub database_cache_size: u32, } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 162d25cf9bfcddebb1691d6ff54db1daac76a3fc..72884330d2e8224cc9959beb5c23bcd67166dea1 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "GPL-3.0" @@ -10,14 +10,17 @@ description = "Substrate RPC for FRAME's support" [dependencies] futures = { version = "0.3.0", features = ["compat"] } -jsonrpc-client-transports = "14" +jsonrpc-client-transports = { version = "14.0.5", default-features = false, features = ["http"] } jsonrpc-core = "14" codec = { package = "parity-scale-codec", version = "1" } serde = "1" -frame-support = { version = "2.0.0-alpha.2", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0-alpha.2", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8.0-alpha.2", path = "../../../../client/rpc-api" } +frame-support = { version = "2.0.0-alpha.5", path = "../../../../frame/support" } +sp-storage = { version = "2.0.0-alpha.5", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.8.0-alpha.5", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0-alpha.2", path = "../../../../frame/system" } +frame-system = { version = "2.0.0-alpha.5", path = "../../../../frame/system" } tokio = "0.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index 42c10fb2cc2e843afd544c5b50aea0aabb6aa367..118f5709a6b7044712002d851fc49dbbd2b5eb6a 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -25,7 +25,7 @@ use jsonrpc_client_transports::RpcError; use codec::{DecodeAll, FullCodec, FullEncode}; use serde::{de::DeserializeOwned, Serialize}; use frame_support::storage::generator::{ - StorageDoubleMap, StorageLinkedMap, StorageMap, StorageValue + StorageDoubleMap, StorageMap, StorageValue }; use sp_storage::{StorageData, StorageKey}; use sc_rpc_api::state::StateClient; @@ -63,9 +63,9 @@ use sc_rpc_api::state::StateClient; /// decl_storage! { /// trait Store for Module as TestRuntime { /// pub LastActionId: u64; -/// pub Voxels: map hasher(blake2_256) Loc => Block; -/// pub Actions: linked_map hasher(blake2_256) u64 => Loc; -/// pub Prefab: double_map hasher(blake2_256) u128, hasher(blake2_256) (i8, i8, i8) => Block; +/// pub Voxels: map hasher(blake2_128_concat) Loc => Block; +/// pub Actions: map hasher(blake2_128_concat) u64 => Loc; +/// pub Prefab: double_map hasher(blake2_128_concat) u128, hasher(blake2_128_concat) (i8, i8, i8) => Block; /// } /// } /// @@ -79,7 +79,7 @@ use sc_rpc_api::state::StateClient; /// let q = StorageQuery::map::((0, 0, 0)); /// let _: Option = q.get(&cl, None).await?; /// -/// let q = StorageQuery::linked_map::(12); +/// let q = StorageQuery::map::(12); /// let _: Option = q.get(&cl, None).await?; /// /// let q = StorageQuery::double_map::(3, (0, 0, 0)); @@ -111,14 +111,6 @@ impl StorageQuery { } } - /// Create a storage query for a value in a StorageLinkedMap. - pub fn linked_map, K: FullCodec>(key: K) -> Self { - Self { - key: StorageKey(St::storage_linked_map_final_key(key)), - _spook: PhantomData, - } - } - /// Create a storage query for a value in a StorageDoubleMap. pub fn double_map, K1: FullEncode, K2: FullEncode>( key1: K1, diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 8b1c62ccd70e2c2290ec29a56156d3d61aac9474..a9e775393dfb17702933d17399a80824872ba5ee 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0-alpha.3" +version = "2.0.0-alpha.5" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" @@ -9,22 +9,25 @@ repository = "https://github.com/paritytech/substrate/" description = "FRAME's system exposed over Substrate RPC" [dependencies] -sc-client = { version = "0.8.0-alpha.2", path = "../../../../client/" } -codec = { package = "parity-scale-codec", version = "1.2.0" } -futures = "0.3.1" +sc-client = { version = "0.8.0-alpha.5", path = "../../../../client/" } +codec = { package = "parity-scale-codec", version = "1.3.0" } +futures = "0.3.4" jsonrpc-core = "14.0.3" -jsonrpc-core-client = "14.0.3" +jsonrpc-core-client = "14.0.5" jsonrpc-derive = "14.0.3" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0-alpha.2", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0-alpha.2", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0-alpha.2", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../../primitives/transaction-pool" } +sp-runtime = { version = "2.0.0-alpha.5", path = "../../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.5", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.5", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "2.0.0-alpha.5", path = "../../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.5", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../../primitives/transaction-pool" } [dev-dependencies] substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../../test-utils/runtime/client" } env_logger = "0.7.0" -sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.5", path = "../../../../client/transaction-pool" } + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index e6214c73d6ea1ab3f0a3b4ffc9cc3d05d4cdd660..c73ddfe93efa067d8188f9abb66c554c9865baad 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -239,6 +239,7 @@ mod tests { BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone()))).0 ); + let source = sp_runtime::transaction_validity::TransactionSource::External; let new_transaction = |nonce: u64| { let t = Transfer { from: AccountKeyring::Alice.into(), @@ -250,9 +251,9 @@ mod tests { }; // Populate the pool let ext0 = new_transaction(0); - block_on(pool.submit_one(&BlockId::number(0), ext0)).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); let ext1 = new_transaction(1); - block_on(pool.submit_one(&BlockId::number(0), ext1)).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); let accounts = FullSystem::new(client, pool); diff --git a/utils/prometheus/Cargo.toml b/utils/prometheus/Cargo.toml index bc7750a72083fa59249db011549c8d63b777db59..add4b0da5ff9565b76e0efb25c5c7ceb888e709a 100644 --- a/utils/prometheus/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,7 +1,7 @@ [package] description = "Endpoint to expose Prometheus metrics" name = "substrate-prometheus-endpoint" -version = "0.8.0-alpha.3" +version = "0.8.0-alpha.5" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" @@ -10,7 +10,7 @@ repository = "https://github.com/paritytech/substrate/" [dependencies] log = "0.4.8" -prometheus = "0.7" +prometheus = "0.8" futures-util = { version = "0.3.1", default-features = false, features = ["io"] } derive_more = "0.99" @@ -18,3 +18,6 @@ derive_more = "0.99" async-std = { version = "1.0.1", features = ["unstable"] } hyper = { version = "0.13.1", default-features = false, features = ["stream"] } tokio = "0.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index 54b9183bc63929859631e5aba7a02e87ac026e8a..9030704cb746ffbb4fa9ebfe5f468751a67a28d2 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -16,7 +16,10 @@ use futures_util::{FutureExt, future::Future}; pub use prometheus::{ + self, Registry, Error as PrometheusError, Opts, + Histogram, HistogramOpts, HistogramVec, + exponential_buckets, core::{ GenericGauge as Gauge, GenericCounter as Counter, GenericGaugeVec as GaugeVec, GenericCounterVec as CounterVec, @@ -120,7 +123,7 @@ mod known_os { .await .map_err(|_| Error::PortInUse(prometheus_addr))?; - log::info!("Prometheus server started at {}", prometheus_addr); + log::info!("〽️ Prometheus server started at {}", prometheus_addr); let service = make_service_fn(move |_| { let registry = registry.clone(); diff --git a/utils/wasm-builder-runner/Cargo.toml b/utils/wasm-builder-runner/Cargo.toml index 8a41fe98b2728efee0a0e71c946853f4adde7be2..77796ea8d9a8b15c21d7531fb772afc43c47ee6a 100644 --- a/utils/wasm-builder-runner/Cargo.toml +++ b/utils/wasm-builder-runner/Cargo.toml @@ -10,3 +10,6 @@ license = "GPL-3.0" homepage = "https://substrate.dev" [dependencies] + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"] diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 1aac8913939b1253e66ad50e53dd18d3d1177cd8..ed953cca577ddf1deaec0f8600b7d473293ab24c 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -19,3 +19,6 @@ fs2 = "0.4.3" wasm-gc-api = "0.1.11" atty = "0.2.13" itertools = "0.8.2" + +[package.metadata.docs.rs] +targets = ["x86_64-unknown-linux-gnu"]