diff --git a/.github/workflows/release-bot.yml b/.github/workflows/release-bot.yml new file mode 100644 index 0000000000000000000000000000000000000000..08aa94417c047d4e93645e4b6d0949ee2df29777 --- /dev/null +++ b/.github/workflows/release-bot.yml @@ -0,0 +1,18 @@ +name: Pushes release updates to a pre-defined Matrix room +on: + release: + types: + - edited + - prereleased + - published +jobs: + ping_matrix: + runs-on: ubuntu-latest + steps: + - name: send message + uses: s3krit/matrix-message-action@v0.0.2 + with: + room_id: ${{ secrets.MATRIX_ROOM_ID }} + access_token: ${{ secrets.MATRIX_ACCESS_TOKEN }} + message: "**${{github.event.repository.full_name}}:** A release has been ${{github.event.action}}
Release version [${{github.event.release.tag_name}}](${{github.event.release.html_url}})

***Description:***
${{github.event.release.body}}
" + server: "matrix.parity.io" diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 62a6c2de32ddfa86fdbe6736737f7f0f3dc1768d..19f7298ab68eac2ed11646f23aff2e296d9d37bc 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -5,6 +5,21 @@ # pipelines can be triggered manually in the web # setting DEPLOY_TAG will only deploy the tagged image +# SAMPLE JOB TEMPLATE - This is not a complete example but is enough to build a +# simple CI job. For full documentation, visit https://docs.gitlab.com/ee/ci/yaml/ +# +# my-example-job: +# stage: test # One of the stages listed below this job (required) +# image: parity/tools:latest # Any docker image (required) +# allow_failure: true # Allow the pipeline to continue if this job fails (default: false) +# dependencies: +# - build-rust-doc-release # Any jobs that are required to run before this job (optional) +# variables: +# MY_ENVIRONMENT_VARIABLE: "some useful value" # Environment variables passed to the job (optional) +# script: +# - echo "List of shell commands to run in your job" +# - echo "You can also just specify a script here, like so:" +# - ./.maintain/gitlab/my_amazing_script.sh stages: - test @@ -22,6 +37,9 @@ variables: CI_SERVER_NAME: "GitLab CI" DOCKER_OS: "debian:stretch" ARCH: "x86_64" + # FIXME set to release + CARGO_UNLEASH_INSTALL_PARAMS: "--version 1.0.0-alpha.8" + CARGO_UNLEASH_PKG_DEF: "--skip node node-* pallet-template pallet-example pallet-example-* subkey chain-spec-builder" .collect-artifacts: &collect-artifacts @@ -86,6 +104,16 @@ check-runtime: interruptible: true allow_failure: true +check-signed-tag: + stage: test + image: parity/tools:latest + <<: *kubernetes-build + only: + - tags + - /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + script: + - ./.maintain/gitlab/check_signed.sh + allow_failure: false check-line-width: stage: test @@ -160,6 +188,16 @@ test-dependency-rules: script: - .maintain/ensure-deps.sh +unleash-check: + stage: test + <<: *docker-env + only: + - master + - tags + script: + - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} + - cargo unleash check ${CARGO_UNLEASH_PKG_DEF} + test-frame-staking: stage: test <<: *docker-env @@ -524,7 +562,28 @@ publish-gh-doc: after_script: - rm -vrf ${HOME}/.gitconfig +publish-draft-release: + stage: publish + image: parity/tools:latest + only: + - tags + - /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + script: + - ./.maintain/gitlab/publish_draft_release.sh + interruptible: true + allow_failure: true +publish-to-crates-io: + stage: publish + <<: *docker-env + only: + - tags + - /^v[0-9]+\.[0-9]+\.[0-9]+.*$/ + script: + - cargo install cargo-unleash ${CARGO_UNLEASH_INSTALL_PARAMS} + - cargo unleash em-dragons --no-check ${CARGO_UNLEASH_PKG_DEF} + interruptible: true + allow_failure: true .deploy-template: &deploy stage: kubernetes diff --git a/.maintain/Dockerfile b/.maintain/Dockerfile index 7cba85c544afc2c8cc1ff56401b2172a01d30364..2fc1532aa2837b3314eaf17a907ca765d33c85b5 100644 --- a/.maintain/Dockerfile +++ b/.maintain/Dockerfile @@ -1,7 +1,7 @@ # Note: We don't use Alpine and its packaged Rust/Cargo because they're too often out of date, # preventing them from being used to build Substrate/Polkadot. -FROM phusion/baseimage:0.10.2 as builder +FROM phusion/baseimage:0.11 as builder LABEL maintainer="chevdor@gmail.com" LABEL description="This is the build stage for Substrate. Here we create the binary." @@ -20,13 +20,12 @@ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && \ export PATH="$PATH:$HOME/.cargo/bin" && \ rustup toolchain install nightly && \ rustup target add wasm32-unknown-unknown --toolchain nightly && \ - rustup default nightly && \ rustup default stable && \ cargo build "--$PROFILE" # ===== SECOND STAGE ====== -FROM phusion/baseimage:0.10.2 +FROM phusion/baseimage:0.11 LABEL maintainer="chevdor@gmail.com" LABEL description="This is the 2nd stage: a very small image where we copy the Substrate binary." ARG PROFILE=release @@ -34,9 +33,10 @@ ARG PROFILE=release RUN mv /usr/share/ca* /tmp && \ rm -rf /usr/share/* && \ mv /tmp/ca-certificates /usr/share/ && \ - mkdir -p /root/.local/share/Polkadot && \ - ln -s /root/.local/share/Polkadot /data && \ - useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate + useradd -m -u 1000 -U -s /bin/sh -d /substrate substrate && \ + mkdir -p /substrate/.local/share/substrate && \ + chown -R substrate:substrate /substrate/.local && \ + ln -s /substrate/.local/share/substrate /data COPY --from=builder /substrate/target/$PROFILE/substrate /usr/local/bin @@ -49,7 +49,7 @@ RUN rm -rf /usr/lib/python* && \ rm -rf /usr/bin /usr/sbin /usr/share/man USER substrate -EXPOSE 30333 9933 9944 +EXPOSE 30333 9933 9944 9615 VOLUME ["/data"] CMD ["/usr/local/bin/substrate"] diff --git a/.maintain/gitlab/check_signed.sh b/.maintain/gitlab/check_signed.sh new file mode 100755 index 0000000000000000000000000000000000000000..7c4cc47baba38fa41214ed0fefc7e09b75a69e7d --- /dev/null +++ b/.maintain/gitlab/check_signed.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +# shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +version="$CI_COMMIT_TAG" + +echo '[+] Checking tag has been signed' +check_tag "paritytech/substrate" "$version" +case $? in + 0) echo '[+] Tag found and has been signed'; exit 0 + ;; + 1) echo '[!] Tag found but has not been signed. Aborting release.'; exit 1 + ;; + 2) echo '[!] Tag not found. Aborting release.'; exit 1 +esac diff --git a/.maintain/gitlab/lib.sh b/.maintain/gitlab/lib.sh new file mode 100755 index 0000000000000000000000000000000000000000..c8b2d73e6097f42fd6590b5e3d78c537dc620028 --- /dev/null +++ b/.maintain/gitlab/lib.sh @@ -0,0 +1,81 @@ +#!/bin/sh + +api_base="https://api.github.com/repos" + +# Function to take 2 git tags/commits and get any lines from commit messages +# that contain something that looks like a PR reference: e.g., (#1234) +sanitised_git_logs(){ + git --no-pager log --pretty=format:"%s" "$1..$2" | + # Only find messages referencing a PR + grep -E '\(#[0-9]+\)' | + # Strip any asterisks + sed 's/^* //g' | + # And add them all back + sed 's/^/* /g' +} + +# Returns the last published release on github +# repo: 'organization/repo' +# Usage: last_github_release "$repo" +last_github_release(){ + curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" \ + -s "$api_base/$1/releases/latest" | jq '.tag_name' +} + +# Checks whether a tag on github has been verified +# repo: 'organization/repo' +# tagver: 'v1.2.3' +# Usage: check_tag $repo $tagver +check_tag () { + repo=$1 + tagver=$2 + tag_out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/git/refs/tags/$tagver") + tag_sha=$(echo "$tag_out" | jq -r .object.sha) + object_url=$(echo "$tag_out" | jq -r .object.url) + if [ "$tag_sha" = "null" ]; then + return 2 + fi + verified_str=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$object_url" | jq -r .verification.verified) + if [ "$verified_str" = "true" ]; then + # Verified, everything is good + return 0 + else + # Not verified. Bad juju. + return 1 + fi +} + +# Checks whether a given PR has a given label. +# repo: 'organization/repo' +# pr_id: 12345 +# label: B1-silent +# Usage: has_label $repo $pr_id $label +has_label(){ + repo="$1" + pr_id="$2" + label="$3" + out=$(curl -H "Authorization: token $GITHUB_RELEASE_TOKEN" -s "$api_base/$repo/pulls/$pr_id") + [ -n "$(echo "$out" | jq ".labels | .[] | select(.name==\"$label\")")" ] +} + +# Formats a message into a JSON string for posting to Matrix +# message: 'any plaintext message' +# formatted_message: 'optional message formatted in html' +# Usage: structure_message $content $formatted_content (optional) +structure_message() { + if [ -z "$2" ]; then + body=$(jq -Rs --arg body "$1" '{"msgtype": "m.text", $body}' < /dev/null) + else + body=$(jq -Rs --arg body "$1" --arg formatted_body "$2" '{"msgtype": "m.text", $body, "format": "org.matrix.custom.html", $formatted_body}' < /dev/null) + fi + echo "$body" +} + +# Post a message to a matrix room +# body: '{body: "JSON string produced by structure_message"}' +# room_id: !fsfSRjgjBWEWffws:matrix.parity.io +# access_token: see https://matrix.org/docs/guides/client-server-api/ +# Usage: send_message $body (json formatted) $room_id $access_token +send_message() { +curl -XPOST -d "$1" "https://matrix.parity.io/_matrix/client/r0/rooms/$2/send/m.room.message?access_token=$3" +} diff --git a/.maintain/gitlab/publish_draft_release.sh b/.maintain/gitlab/publish_draft_release.sh new file mode 100755 index 0000000000000000000000000000000000000000..4f73575f5bbaf836d7e07cb43c7fd1c1173858da --- /dev/null +++ b/.maintain/gitlab/publish_draft_release.sh @@ -0,0 +1,86 @@ +#!/usr/bin/env bash + +# shellcheck source=lib.sh +source "$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )/lib.sh" + +# Substrate labels for PRs we want to include in the release notes +labels=( + 'B1-runtimenoteworthy' + 'B1-clientnoteworthy' + 'B1-apinoteworthy' +) + +version="$CI_COMMIT_TAG" + +# Note that this is not the last *tagged* version, but the last *published* version +last_version=$(last_github_release 'paritytech/substrate') +echo "[+] Version: $version; Previous version: $last_version" + +all_changes="$(sanitised_git_logs "$last_version" "$version")" +labelled_changes="" +echo "[+] Iterating through $(wc -l <<< "$all_changes") changes to find labelled PRs" +while IFS= read -r line; do + pr_id=$(echo "$line" | sed -E 's/.*#([0-9]+)\)$/\1/') + + # Skip if the PR has the silent label - this allows us to skip a few requests + if has_label 'paritytech/substrate' "$pr_id" 'B0-silent'; then + continue + fi + for label in "${labels[@]}"; do + if has_label 'paritytech/substrate' "$pr_id" "$label"; then + labelled_changes="$labelled_changes +$line" + fi + done +done <<< "$all_changes" + + +release_text="Substrate $version +----------------- +$labelled_changes" + +echo "[+] Release text generated: " +echo "$release_text" + +echo "[+] Pushing release to github" +# Create release on github +release_name="Substrate $version" +data=$(jq -Rs --arg version "$version" \ + --arg release_name "$release_name" \ + --arg release_text "$release_text" \ +'{ + "tag_name": $version, + "target_commitish": "master", + "name": $release_name, + "body": $release_text, + "draft": true, + "prerelease": false +}' < /dev/null) + +out=$(curl -s -X POST --data "$data" -H "Authorization: token $GITHUB_RELEASE_TOKEN" "$api_base/paritytech/substrate/releases") + +html_url=$(echo "$out" | jq -r .html_url) + +if [ "$html_url" == "null" ] +then + echo "[!] Something went wrong posting:" + echo "$out" +else + echo "[+] Release draft created: $html_url" +fi + +echo '[+] Sending draft release URL to Matrix' + +msg_body=$(cat <Release pipeline for Substrate $version complete.
+Draft release created: $html_url +EOF +) +send_message "$(structure_message "$msg_body" "$formatted_msg_body")" "$MATRIX_ROOM_ID" "$MATRIX_ACCESS_TOKEN" + +echo "[+] Done! Maybe the release worked..." diff --git a/Cargo.lock b/Cargo.lock index 960183fc3c6cd61bf3217dca28d2a7ac8eb6c2dc..6e1a59b2814088d0804a6cc34817427c80fdb957 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -61,9 +61,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "743ad5a418686aad3b87fd14c43badd828cf26e214a00f92a384291cf22e1811" +checksum = "d5e63fd144e18ba274ae7095c0197a870a7b9468abc801dd62f190d80817d2ec" dependencies = [ "memchr", ] @@ -167,7 +167,7 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0d0864d84b8e07b145449be9a8537db86bf9de5ce03b913214694643b4743502" dependencies = [ - "quote 1.0.2", + "quote", "syn", ] @@ -263,9 +263,9 @@ checksum = "f8aac770f1885fd7e387acedd76065302551364496e46b3dd00860b2f8359b9d" [[package]] name = "backtrace" -version = "0.3.43" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f80256bc78f67e7df7e36d77366f636ed976895d91fe2ab9efa3973e8fe8c4f" +checksum = "e4036b9bf40f3cf16aba72a3d65e8a520fc4bafcdc7079aea8f848c58c5b5536" dependencies = [ "backtrace-sys", "cfg-if", @@ -326,25 +326,26 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.49.4" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c07087f3d5731bf3fb375a81841b99597e25dc11bd3bc72d16d43adf6624a6e" +checksum = "99de13bb6361e01e493b3db7928085dcc474b7ba4f5481818e53a89d76b8393f" dependencies = [ "bitflags", "cexpr", "cfg-if", "clang-sys", "clap", - "env_logger 0.6.2", - "fxhash", + "env_logger 0.7.1", "lazy_static", + "lazycell", "log 0.4.8", "peeking_take_while", - "proc-macro2 0.4.30", - "quote 0.6.13", + "proc-macro2", + "quote", "regex", + "rustc-hash", "shlex", - "which 2.0.1", + "which", ] [[package]] @@ -443,29 +444,10 @@ dependencies = [ ] [[package]] -name = "browser-utils" -version = "0.8.0" -dependencies = [ - "chrono", - "clear_on_drop", - "console_error_panic_hook", - "console_log", - "futures 0.1.29", - "futures 0.3.4", - "futures-timer 3.0.1", - "js-sys", - "kvdb-web", - "libp2p", - "log 0.4.8", - "rand 0.6.5", - "rand 0.7.3", - "sc-chain-spec", - "sc-informant", - "sc-network", - "sc-service", - "wasm-bindgen", - "wasm-bindgen-futures", -] +name = "bs58" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c95ee6bba9d950218b6cc910cf62bc9e0a171d0f4537e3627b0f54d08549b188" [[package]] name = "bs58" @@ -612,7 +594,7 @@ dependencies = [ [[package]] name = "chain-spec-builder" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "ansi_term 0.12.1", "node-cli", @@ -631,7 +613,6 @@ dependencies = [ "js-sys", "num-integer", "num-traits", - "serde", "time", "wasm-bindgen", ] @@ -753,25 +734,23 @@ checksum = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b" [[package]] name = "cranelift-bforest" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd0f53d59dc9ab1c8ab68c991d8406b52b7a0aab0b15b05a3a6895579c4e5dd9" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "cranelift-entity", ] [[package]] name = "cranelift-codegen" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0381a794836fb994c47006465d46d46be072483b667f36013d993b9895117fee" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "byteorder 1.3.4", "cranelift-bforest", "cranelift-codegen-meta", "cranelift-codegen-shared", "cranelift-entity", - "gimli 0.20.0", + "gimli", "log 0.4.8", "serde", "smallvec 1.2.0", @@ -781,9 +760,8 @@ dependencies = [ [[package]] name = "cranelift-codegen-meta" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "208c3c8d82bfef32a534c5020c6cfc3bc92f41388f1246b7bb98cf543331abaa" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "cranelift-codegen-shared", "cranelift-entity", @@ -791,24 +769,21 @@ dependencies = [ [[package]] name = "cranelift-codegen-shared" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea048c456a517e56fd6df8f0e3947922897e6e6f61fbc5eb557a36c7b8ff6394" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" [[package]] name = "cranelift-entity" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8c7ed50812194c9e9de1fa39c77b39fc9ab48173d5e7ee88b25b6a8953e9b8" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "serde", ] [[package]] name = "cranelift-frontend" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21ceb931d9f919731df1b1ecdc716b5c66384b413a7f95909d1f45441ab9bef5" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "cranelift-codegen", "log 0.4.8", @@ -818,9 +793,8 @@ dependencies = [ [[package]] name = "cranelift-native" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "564ee82268bc25b914fcf331edfc2452f2d9ca34f976b187b4ca668beba250c8" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "cranelift-codegen", "raw-cpuid", @@ -829,9 +803,8 @@ dependencies = [ [[package]] name = "cranelift-wasm" -version = "0.58.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de63e2271b374be5b07f359184e2126a08fb24d24a740cbc178b7e0107ddafa5" +version = "0.59.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "cranelift-codegen", "cranelift-entity", @@ -839,7 +812,7 @@ dependencies = [ "log 0.4.8", "serde", "thiserror", - "wasmparser 0.48.2", + "wasmparser", ] [[package]] @@ -926,33 +899,36 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acec9a3b0b3559f15aee4f90746c4e5e293b701c0f7d3925d24e01645267b68c" +checksum = "cced8691919c02aac3cb0a1bc2e9b73d89e832bf9a06fc579d4e71b68a2da061" dependencies = [ "crossbeam-utils", + "maybe-uninit", ] [[package]] name = "crossbeam-deque" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3aa945d63861bfe624b55d153a39684da1e8c0bc8fba932f7ee3a3c16cea3ca" +checksum = "9f02af974daeee82218205558e51ec8768b48cf524bd01d550abe5573a608285" dependencies = [ "crossbeam-epoch", "crossbeam-utils", + "maybe-uninit", ] [[package]] name = "crossbeam-epoch" -version = "0.8.0" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5064ebdbf05ce3cb95e45c8b086f72263f4166b29b97f6baff7ef7fe047b55ac" +checksum = "058ed274caafc1f60c4997b5fc07bf7dc7cca454af7c6e81edffe5f33f70dace" dependencies = [ - "autocfg 0.1.7", + "autocfg 1.0.0", "cfg-if", "crossbeam-utils", "lazy_static", + "maybe-uninit", "memoffset", "scopeguard", ] @@ -969,11 +945,11 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce446db02cdc3165b94ae73111e570793400d0794e46125cc4056c81cbb039f4" +checksum = "c3c7c73a2d1e9fc0886a08b93e98eb643461230d5f1925e4036204d5f2e261a8" dependencies = [ - "autocfg 0.1.7", + "autocfg 1.0.0", "cfg-if", "lazy_static", ] @@ -1009,9 +985,9 @@ dependencies = [ [[package]] name = "csv-core" -version = "0.1.6" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b5cadb6b25c77aeff80ba701712494213f4a8418fcda2ee11b6560c3ad0bf4c" +checksum = "2b2466559f260f48ad25fe6317b3c8dac77b5bdb5763ac7d9d6103530663bc90" dependencies = [ "memchr", ] @@ -1027,11 +1003,11 @@ dependencies = [ [[package]] name = "ctor" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd8ce37ad4184ab2ce004c33bf6379185d3b1c95801cab51026bd271bf68eedc" +checksum = "47c5e5ac752e18207b12e16b10631ae5f7f68f8805f335f9b817ead83d9ffce1" dependencies = [ - "quote 1.0.2", + "quote", "syn", ] @@ -1083,18 +1059,18 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.1.2" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f47ca1860a761136924ddd2422ba77b2ea54fe8cc75b9040804a0d9d32ad97" +checksum = "11c0346158a19b3627234e15596f5e465c360fcdb97d817bcb255e0510f5a788" [[package]] name = "derive_more" -version = "0.99.2" +version = "0.99.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2159be042979966de68315bce7034bb000c775f22e3e834e1c52ff78f041cae8" +checksum = "a806e96c59a76a5ba6e18735b6cf833344671e61e7863f2edb5c518ea2cac95c" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -1184,8 +1160,8 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecf634c5213044b8d54a46dd282cf5dd1f86bb5cb53e92c409cb4680a7fb9894" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -1263,6 +1239,33 @@ dependencies = [ "serde_json", ] +[[package]] +name = "ethbloom" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32cfe1c169414b709cf28aa30c74060bdb830a03a8ba473314d079ac79d80a5f" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp", + "impl-serde 0.2.3", + "tiny-keccak 1.5.0", +] + +[[package]] +name = "ethereum-types" +version = "0.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba744248e3553a393143d5ebb68939fc3a4ec0c22a269682535f5ffe7fed728c" +dependencies = [ + "ethbloom", + "fixed-hash", + "impl-rlp", + "impl-serde 0.2.3", + "primitive-types", + "uint", +] + [[package]] name = "evm" version = "0.15.0" @@ -1336,9 +1339,9 @@ dependencies = [ [[package]] name = "failure" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8273f13c977665c5db7eb2b99ae520952fe5ac831ae4cd09d80c4c7042b5ed9" +checksum = "b8529c2421efa3066a5cbd8063d2244603824daccb6936b079010bb2aa89464b" dependencies = [ "backtrace", "failure_derive", @@ -1346,12 +1349,12 @@ dependencies = [ [[package]] name = "failure_derive" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bc225b78e0391e4b8683440bf2e63c2deeeb2ce5189eab46e2b68c6d3725d08" +checksum = "030a733c8287d6213886dd487564ff5c8f6aae10278b3588ed177f9d18f8d231" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "synstructure", ] @@ -1457,24 +1460,28 @@ checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" [[package]] name = "fork-tree" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", ] [[package]] name = "frame-benchmarking" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ + "frame-support", + "frame-system", "parity-scale-codec", "sp-api", + "sp-io", + "sp-runtime", "sp-runtime-interface", "sp-std", ] [[package]] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-benchmarking", "linregress", @@ -1490,7 +1497,7 @@ dependencies = [ [[package]] name = "frame-executive" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -1508,7 +1515,7 @@ dependencies = [ [[package]] name = "frame-metadata" -version = "11.0.0" +version = "11.0.0-alpha.3" dependencies = [ "parity-scale-codec", "serde", @@ -1518,7 +1525,7 @@ dependencies = [ [[package]] name = "frame-support" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "bitmask", "frame-metadata", @@ -1543,37 +1550,37 @@ dependencies = [ [[package]] name = "frame-support-procedural" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support-procedural-tools", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support-procedural-tools-derive", "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "frame-support-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "frame-support", "parity-scale-codec", @@ -1589,7 +1596,7 @@ dependencies = [ [[package]] name = "frame-system" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "criterion 0.2.11", "frame-support", @@ -1607,7 +1614,7 @@ dependencies = [ [[package]] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-api", @@ -1766,8 +1773,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9a5081aa3de1f7542a794a397cde100ed903b0630152d0973479018fd85423a7" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -1791,9 +1798,9 @@ checksum = "a1de7508b218029b0f01662ed8f61b1c964b3ae99d6f25462d0f55a595109df6" [[package]] name = "futures-timer" -version = "3.0.1" +version = "3.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3de1a2b2a2a33d9e60e17980b60ee061eeaae96a5abe9121db0fdb9af167a1c5" +checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" dependencies = [ "gloo-timers", "send_wrapper 0.4.0", @@ -1844,15 +1851,6 @@ dependencies = [ "pin-project", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder 1.3.4", -] - [[package]] name = "gcc" version = "0.3.55" @@ -1902,27 +1900,18 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "gimli" -version = "0.19.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162d18ae5f2e3b90a993d202f1ba17a5633c2484426f8bcae201f86194bacd00" -dependencies = [ - "arrayvec 0.4.12", - "byteorder 1.3.4", - "fallible-iterator", - "indexmap", - "stable_deref_trait", -] - [[package]] name = "gimli" version = "0.20.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81dd6190aad0f05ddbbf3245c54ed14ca4aa6dd32f22312b70d8f168c3e3e633" dependencies = [ + "arrayvec 0.5.1", "byteorder 1.3.4", + "fallible-iterator", "indexmap", + "smallvec 1.2.0", + "stable_deref_trait", ] [[package]] @@ -1974,34 +1963,6 @@ dependencies = [ "scroll", ] -[[package]] -name = "grafana-data-source" -version = "0.8.0" -dependencies = [ - "async-std", - "chrono", - "derive_more", - "futures-timer 3.0.1", - "futures-util", - "hyper 0.13.2", - "lazy_static", - "log 0.4.8", - "parking_lot 0.10.0", - "serde", - "serde_json", - "tokio 0.2.11", -] - -[[package]] -name = "grafana-data-source-test" -version = "2.0.0" -dependencies = [ - "futures 0.3.4", - "futures-timer 3.0.1", - "grafana-data-source", - "rand 0.7.3", -] - [[package]] name = "h2" version = "0.1.26" @@ -2035,7 +1996,7 @@ dependencies = [ "indexmap", "log 0.4.8", "slab", - "tokio 0.2.11", + "tokio 0.2.12", "tokio-util", ] @@ -2054,12 +2015,6 @@ dependencies = [ "crunchy", ] -[[package]] -name = "hashbrown" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1de41fb8dba9714efd92241565cdff73f78508c95697dd56787d3cba27e2353" - [[package]] name = "hashbrown" version = "0.6.3" @@ -2081,18 +2036,18 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eff2656d88f158ce120947499e971d743c05dbcbed62e5bd2f38f1698bbc3772" +checksum = "1010591b26bbfe835e9faeabeb11866061cc7dcebffd56ad7d0942d0e61aefd8" dependencies = [ "libc", ] [[package]] name = "hex" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76cdda6bf525062a0c9e8f14ee2b37935c86b8efb6c8b69b3c83dfb518a914af" +checksum = "644f9158b2f133fd50f5fb3242878846d9eb792e445c893805ff0e3824006e35" [[package]] name = "hex-literal" @@ -2261,7 +2216,7 @@ dependencies = [ "net2", "pin-project", "time", - "tokio 0.2.11", + "tokio 0.2.12", "tower-service", "want 0.3.0", ] @@ -2278,7 +2233,7 @@ dependencies = [ "hyper 0.13.2", "rustls", "rustls-native-certs", - "tokio 0.2.11", + "tokio 0.2.12", "tokio-rustls", "webpki", ] @@ -2360,8 +2315,8 @@ version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef5550a42e3740a0e71f909d4c861056a284060af885ae7aa6242820f920d9d" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -2483,8 +2438,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8609af8f63b626e8e211f52441fcdb6ec54f1a446606b10d5c89ae9bf8a20058" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -2665,9 +2620,9 @@ checksum = "3576a87f2ba00f6f106fdfcd16db1d698d648a26ad8e0573cad8537c3c362d2a" [[package]] name = "libc" -version = "0.2.66" +version = "0.2.67" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d515b1f41455adea1313a4a2ac8a8a477634fbae63cc6100e3aebb207ce61558" +checksum = "eb147597cdf94ed43ab7a9038716637d2d1bf2bc571da995d0028dec06bd3018" [[package]] name = "libloading" @@ -2687,9 +2642,9 @@ checksum = "7fc7aa29613bd6a620df431842069224d8bc9011086b1db4c0e0cd47fa03ec9a" [[package]] name = "libp2p" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a58becf0b9585fcfbb8215bbe6e6ac187fcc180fd1026925ca180c845aa5a6e8" +checksum = "bba17ee9cac4bb89de5812159877d9b4f0a993bf41697a5a875940cd1eb71f24" dependencies = [ "bytes 0.5.4", "futures 0.3.4", @@ -2715,8 +2670,8 @@ dependencies = [ "libp2p-wasm-ext", "libp2p-websocket", "libp2p-yamux", - "parity-multiaddr", - "parity-multihash", + "parity-multiaddr 0.7.2", + "parity-multihash 0.2.3", "parking_lot 0.10.0", "pin-project", "smallvec 1.2.0", @@ -2730,17 +2685,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b874594c4b29de1a29f27871feba8e6cd13aa54a8a1e8f8c7cf3dfac5ca287c" dependencies = [ "asn1_der", - "bs58", + "bs58 0.3.0", "ed25519-dalek", "fnv", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "lazy_static", "libsecp256k1", "log 0.4.8", "multistream-select", - "parity-multiaddr", - "parity-multihash", + "parity-multiaddr 0.7.2", + "parity-multihash 0.2.3", "parking_lot 0.10.0", "pin-project", "prost", @@ -2751,7 +2706,7 @@ dependencies = [ "sha2", "smallvec 1.2.0", "thiserror", - "unsigned-varint", + "unsigned-varint 0.3.1", "void", "zeroize 1.1.0", ] @@ -2762,7 +2717,7 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d472e9d522f588805c77801de10b957be84e10f019ca5f869fa1825b15ea9b" dependencies = [ - "quote 1.0.2", + "quote", "syn", ] @@ -2820,13 +2775,13 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.8", - "lru 0.4.3", + "lru", "prost", "prost-build", "rand 0.7.3", "sha2", "smallvec 1.2.0", - "unsigned-varint", + "unsigned-varint 0.3.1", "wasm-timer", ] @@ -2848,9 +2803,9 @@ dependencies = [ [[package]] name = "libp2p-kad" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2efcff2af085e8181c421f68fe9c2b0a067379d146731925b3ac8f8e605c458" +checksum = "464dc8412978d40f0286be72ed9ab5e0e1386a4a06e7f174526739b5c3c1f041" dependencies = [ "arrayvec 0.5.1", "bytes 0.5.4", @@ -2861,14 +2816,14 @@ dependencies = [ "libp2p-core", "libp2p-swarm", "log 0.4.8", - "parity-multihash", + "parity-multihash 0.2.3", "prost", "prost-build", "rand 0.7.3", "sha2", "smallvec 1.2.0", "uint", - "unsigned-varint", + "unsigned-varint 0.3.1", "void", "wasm-timer", ] @@ -2908,16 +2863,16 @@ dependencies = [ "libp2p-core", "log 0.4.8", "parking_lot 0.10.0", - "unsigned-varint", + "unsigned-varint 0.3.1", ] [[package]] name = "libp2p-noise" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7d33809afdf6794f09fdb2f9f94e1550ae230be5bae6430a078eb96fc9e5a6" +checksum = "b15a8a3d71f898beb6f854c8aae27aa1d198e0d1f2e49412261c2d90ef39675a" dependencies = [ - "curve25519-dalek 1.2.3", + "curve25519-dalek 2.0.0", "futures 0.3.4", "lazy_static", "libp2p-core", @@ -2928,7 +2883,7 @@ dependencies = [ "sha2", "snow", "static_assertions", - "x25519-dalek 0.5.2", + "x25519-dalek", "zeroize 1.1.0", ] @@ -2961,7 +2916,7 @@ dependencies = [ "prost", "prost-build", "rw-stream-sink", - "unsigned-varint", + "unsigned-varint 0.3.1", "void", ] @@ -2981,9 +2936,9 @@ dependencies = [ [[package]] name = "libp2p-secio" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec00eb9a3404ed76a0e14f637edcaa7f2b4a27a16884da4a56f2f21e166c2843" +checksum = "1219e9ecb4945d7331a05f5ffe96a1f6e28051bfa1223d4c60353c251de0354e" dependencies = [ "aes-ctr", "ctr", @@ -3011,9 +2966,9 @@ dependencies = [ [[package]] name = "libp2p-swarm" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1e9f4fb84a4bfe3d3a361c1fbcd4af017ba68f0a46a77bfbcc48bf8a456d6ef" +checksum = "275471e7c0e88ae004660866cd54f603bd8bd1f4caef541a27f50dd8640c4d4c" dependencies = [ "futures 0.3.4", "libp2p-core", @@ -3031,7 +2986,7 @@ checksum = "f9e80ad4e3535345f3d666554ce347d3100453775611c05c60786bf9a1747a10" dependencies = [ "async-std", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "get_if_addrs", "ipnet", "libp2p-core", @@ -3052,9 +3007,9 @@ dependencies = [ [[package]] name = "libp2p-wasm-ext" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39703653caa36f4afd0def39cc49a3ac0fa1d4289ca1802e417af03e4f5ef950" +checksum = "923581c055bc4b8c5f42d4ce5ef43e52fe5216f1ea4bc26476cb8a966ce6220b" dependencies = [ "futures 0.3.4", "js-sys", @@ -3087,9 +3042,9 @@ dependencies = [ [[package]] name = "libp2p-yamux" -version = "0.16.0" +version = "0.16.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f72aa5a7273c29c6eaea09108a49feaefc7456164863f64f86a193f9e78a4b7f" +checksum = "9dac30de24ccde0e67f363d71a125c587bbe6589503f664947e9b084b68a34f1" dependencies = [ "futures 0.3.4", "libp2p-core", @@ -3100,9 +3055,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "6.2.4" +version = "6.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a0785e816e1e11e7599388a492c61ef80ddc2afc91e313e61662cce537809be" +checksum = "4e3b727e2dd20ec2fb7ed93f23d9fd5328a0871185485ebdaff007b47d3e27e4" dependencies = [ "bindgen", "cc", @@ -3191,22 +3146,13 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "lru" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8f669d42c72d18514dfca8115689c5f6370a17d980cb5bd777a67f404594c8" -dependencies = [ - "hashbrown 0.5.0", -] - [[package]] name = "lru" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609345ddee5badacf857d4f547e0e5a2e987db77085c24cd887f73573a04237" dependencies = [ - "hashbrown 0.6.3", + "hashbrown", ] [[package]] @@ -3241,12 +3187,9 @@ checksum = "60302e4db3a61da70c0cb7991976248362f30319e88850c487b9b95bbf059e00" [[package]] name = "memchr" -version = "2.3.0" +version = "2.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3197e20c7edb283f87c071ddfc7a2cca8f8e0b888c242959846a6fce03c72223" -dependencies = [ - "libc", -] +checksum = "3728d817d99e5ac407411fa471ff9800a778d88a24685968b36824eaf4bee400" [[package]] name = "memoffset" @@ -3265,7 +3208,7 @@ checksum = "198831fe8722331a395bc199a5d08efbc197497ef354cb4c77b969c02ffc0fc4" dependencies = [ "ahash", "hash-db", - "hashbrown 0.6.3", + "hashbrown", "parity-util-mem", ] @@ -3382,7 +3325,7 @@ dependencies = [ "log 0.4.8", "smallvec 1.2.0", "tokio-io", - "unsigned-varint", + "unsigned-varint 0.3.1", ] [[package]] @@ -3455,10 +3398,9 @@ dependencies = [ [[package]] name = "node-cli" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "assert_cmd", - "browser-utils", "frame-benchmarking-cli", "frame-support", "frame-system", @@ -3502,6 +3444,7 @@ dependencies = [ "sc-tracing", "sc-transaction-pool", "serde", + "serde_json", "sp-authority-discovery", "sp-consensus", "sp-consensus-babe", @@ -3515,6 +3458,7 @@ dependencies = [ "sp-timestamp", "sp-transaction-pool", "structopt", + "substrate-browser-utils", "substrate-build-script-utils", "tempfile", "tracing", @@ -3525,7 +3469,7 @@ dependencies = [ [[package]] name = "node-executor" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "criterion 0.3.1", "frame-benchmarking", @@ -3558,7 +3502,7 @@ dependencies = [ [[package]] name = "node-inspect" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "log 0.4.8", @@ -3574,7 +3518,7 @@ dependencies = [ [[package]] name = "node-primitives" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "pretty_assertions", "sp-core", @@ -3584,7 +3528,7 @@ dependencies = [ [[package]] name = "node-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "jsonrpc-core", "node-primitives", @@ -3607,7 +3551,7 @@ dependencies = [ [[package]] name = "node-rpc-client" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "env_logger 0.7.1", "futures 0.1.29", @@ -3620,7 +3564,7 @@ dependencies = [ [[package]] name = "node-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-benchmarking", "frame-executive", @@ -3682,7 +3626,7 @@ dependencies = [ [[package]] name = "node-template" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "futures 0.3.4", "log 0.4.8", @@ -3690,6 +3634,7 @@ dependencies = [ "sc-basic-authorship", "sc-cli", "sc-client", + "sc-client-api", "sc-consensus-aura", "sc-executor", "sc-finality-grandpa", @@ -3710,7 +3655,7 @@ dependencies = [ [[package]] name = "node-template-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-executive", "frame-support", @@ -3718,7 +3663,6 @@ dependencies = [ "pallet-aura", "pallet-balances", "pallet-grandpa", - "pallet-indices", "pallet-randomness-collective-flip", "pallet-sudo", "pallet-template", @@ -3743,7 +3687,7 @@ dependencies = [ [[package]] name = "node-testing" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "criterion 0.3.1", "frame-support", @@ -3782,13 +3726,13 @@ dependencies = [ "sp-runtime", "sp-timestamp", "substrate-test-client", - "tempdir", + "tempfile", "wabt", ] [[package]] name = "node-transaction-factory" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "log 0.4.8", "parity-scale-codec", @@ -3888,6 +3832,20 @@ dependencies = [ "libc", ] +[[package]] +name = "object" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea44a4fd660ab0f38434934ca0212e90fbeaaee54126ef20a3451c30c95bafae" +dependencies = [ + "flate2", + "goblin", + "parity-wasm 0.41.0", + "scroll", + "target-lexicon", + "uuid", +] + [[package]] name = "ole32-sys" version = "0.2.0" @@ -3963,16 +3921,16 @@ dependencies = [ [[package]] name = "owning_ref" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49a4b8ea2179e6a2e27411d3bca09ca6dd630821cf6894c6c7c8467a8ee7ef13" +checksum = "6ff55baddef9e4ad00f88b6c743a2a8062d4c6ade126c2a528644b8e444d52ce" dependencies = [ "stable_deref_trait", ] [[package]] name = "pallet-assets" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -3986,7 +3944,7 @@ dependencies = [ [[package]] name = "pallet-aura" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4008,7 +3966,7 @@ dependencies = [ [[package]] name = "pallet-authority-discovery" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4026,7 +3984,7 @@ dependencies = [ [[package]] name = "pallet-authorship" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4042,7 +4000,7 @@ dependencies = [ [[package]] name = "pallet-babe" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4067,7 +4025,7 @@ dependencies = [ [[package]] name = "pallet-balances" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-benchmarking", "frame-support", @@ -4081,9 +4039,23 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-benchmark" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-benchmarking", + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-collective" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4099,7 +4071,7 @@ dependencies = [ [[package]] name = "pallet-contracts" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "assert_matches", "frame-support", @@ -4124,7 +4096,7 @@ dependencies = [ [[package]] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -4133,7 +4105,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4152,7 +4124,7 @@ dependencies = [ [[package]] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "pallet-contracts-primitives", "parity-scale-codec", @@ -4163,7 +4135,7 @@ dependencies = [ [[package]] name = "pallet-democracy" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4180,7 +4152,7 @@ dependencies = [ [[package]] name = "pallet-elections" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4196,7 +4168,7 @@ dependencies = [ [[package]] name = "pallet-elections-phragmen" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4214,7 +4186,7 @@ dependencies = [ [[package]] name = "pallet-evm" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "evm", "frame-support", @@ -4234,8 +4206,9 @@ dependencies = [ [[package]] name = "pallet-example" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ + "frame-benchmarking", "frame-support", "frame-system", "pallet-balances", @@ -4247,9 +4220,24 @@ dependencies = [ "sp-std", ] +[[package]] +name = "pallet-example-offchain-worker" +version = "2.0.0-alpha.3" +dependencies = [ + "frame-support", + "frame-system", + "parity-scale-codec", + "serde", + "serde_json", + "sp-core", + "sp-io", + "sp-runtime", + "sp-std", +] + [[package]] name = "pallet-finality-tracker" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4266,7 +4254,7 @@ dependencies = [ [[package]] name = "pallet-generic-asset" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4280,7 +4268,7 @@ dependencies = [ [[package]] name = "pallet-grandpa" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4298,7 +4286,7 @@ dependencies = [ [[package]] name = "pallet-identity" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "enumflags2", "frame-benchmarking", @@ -4315,7 +4303,7 @@ dependencies = [ [[package]] name = "pallet-im-online" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4333,7 +4321,7 @@ dependencies = [ [[package]] name = "pallet-indices" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4349,7 +4337,7 @@ dependencies = [ [[package]] name = "pallet-membership" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4363,7 +4351,7 @@ dependencies = [ [[package]] name = "pallet-nicks" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4378,7 +4366,7 @@ dependencies = [ [[package]] name = "pallet-offences" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4394,7 +4382,7 @@ dependencies = [ [[package]] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4408,7 +4396,7 @@ dependencies = [ [[package]] name = "pallet-recovery" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "enumflags2", "frame-support", @@ -4424,7 +4412,7 @@ dependencies = [ [[package]] name = "pallet-scored-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4439,7 +4427,7 @@ dependencies = [ [[package]] name = "pallet-session" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4459,7 +4447,7 @@ dependencies = [ [[package]] name = "pallet-society" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4475,7 +4463,7 @@ dependencies = [ [[package]] name = "pallet-staking" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4498,18 +4486,18 @@ dependencies = [ [[package]] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "sp-runtime", "syn", ] [[package]] name = "pallet-sudo" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4523,7 +4511,7 @@ dependencies = [ [[package]] name = "pallet-template" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4536,7 +4524,7 @@ dependencies = [ [[package]] name = "pallet-timestamp" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-benchmarking", "frame-support", @@ -4554,7 +4542,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4569,7 +4557,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "jsonrpc-core", "jsonrpc-core-client", @@ -4586,7 +4574,7 @@ dependencies = [ [[package]] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "parity-scale-codec", @@ -4599,7 +4587,7 @@ dependencies = [ [[package]] name = "pallet-treasury" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4614,7 +4602,7 @@ dependencies = [ [[package]] name = "pallet-utility" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -4629,7 +4617,7 @@ dependencies = [ [[package]] name = "pallet-vesting" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "enumflags2", "frame-support", @@ -4651,6 +4639,24 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c276d76c5333b8c2579e02d49a06733a55b8282d2d9b13e8d53b6406bd7e30a" +[[package]] +name = "parity-multiaddr" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "045b3c7af871285146300da35b1932bb6e4639b66c7c98e85d06a32cbc4e8fa7" +dependencies = [ + "arrayref", + "bs58 0.2.5", + "byteorder 1.3.4", + "bytes 0.4.12", + "data-encoding", + "parity-multihash 0.1.3", + "percent-encoding 1.0.1", + "serde", + "unsigned-varint 0.2.3", + "url 1.7.2", +] + [[package]] name = "parity-multiaddr" version = "0.7.2" @@ -4658,17 +4664,32 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "26df883298bc3f4e92528b4c5cc9f806b791955b136da3e5e939ed9de0fd958b" dependencies = [ "arrayref", - "bs58", + "bs58 0.3.0", "byteorder 1.3.4", "data-encoding", - "parity-multihash", + "parity-multihash 0.2.3", "percent-encoding 2.1.0", "serde", "static_assertions", - "unsigned-varint", + "unsigned-varint 0.3.1", "url 2.1.1", ] +[[package]] +name = "parity-multihash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df3a17dc27848fd99e4f87eb0f8c9baba6ede0a6d555400c850ca45254ef4ce3" +dependencies = [ + "blake2", + "bytes 0.4.12", + "rand 0.6.5", + "sha-1", + "sha2", + "sha3", + "unsigned-varint 0.2.3", +] + [[package]] name = "parity-multihash" version = "0.2.3" @@ -4681,14 +4702,14 @@ dependencies = [ "sha-1", "sha2", "sha3", - "unsigned-varint", + "unsigned-varint 0.3.1", ] [[package]] name = "parity-scale-codec" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f747c06d9f3b2ad387ac881b9667298c81b1243aa9833f086e05996937c35507" +checksum = "f509c5e67ca0605ee17dcd3f91ef41cadd685c75a298fb6261b781a5acb3f910" dependencies = [ "arrayvec 0.5.1", "bitvec", @@ -4699,13 +4720,13 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "1.1.1" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34e513ff3e406f3ede6796dcdc83d0b32ffb86668cea1ccf7363118abeb00476" +checksum = "5a0ec292e92e8ec7c58e576adacc1e3f399c597c8f263c42f18420abe58e7245" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -4722,7 +4743,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef1476e40bf8f5c6776e9600983435821ca86eb9819d74a6207cca69d091406a" dependencies = [ "cfg-if", + "ethereum-types", + "hashbrown", "impl-trait-for-tuples", + "lru", "parity-util-mem-derive", "parking_lot 0.10.0", "primitive-types", @@ -4736,7 +4760,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f557c32c6d268a07c921471619c0295f5efad3a0e76d4f97a05c091a51d110b2" dependencies = [ - "proc-macro2 1.0.8", + "proc-macro2", "syn", "synstructure", ] @@ -4808,9 +4832,9 @@ dependencies = [ [[package]] name = "paste" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "423a519e1c6e828f1e73b720f9d9ed2fa643dce8a7737fb43235ce0b41eeaa49" +checksum = "63e1afe738d71b1ebab5f1207c055054015427dbfc7bbe9ee1266894156ec046" dependencies = [ "paste-impl", "proc-macro-hack", @@ -4818,13 +4842,13 @@ dependencies = [ [[package]] name = "paste-impl" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4214c9e912ef61bf42b81ba9a47e8aad1b2ffaf739ab162bf96d1e011f54e6c5" +checksum = "6d4dc4a7f6f743211c5aab239640a65091535d97d43d92a52bca435a640892bb" dependencies = [ "proc-macro-hack", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -4887,8 +4911,8 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "385322a45f2ecf3410c68d2a549a4a2685e8051d0f278e39743ff4e451cb9b3f" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -4936,9 +4960,9 @@ checksum = "74490b50b9fbe561ac330df47c08f3f33073d2d00c150f719147d7c54522fa1b" [[package]] name = "predicates" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9bfe52247e5cc9b2f943682a85a5549fb9662245caf094504e69a2f03fe64d4" +checksum = "1188bf092c81c18228c383b190c069a8a613c18a046ffa9fdfc0f5fc8fb2da8a" dependencies = [ "difference", "predicates-core", @@ -4996,25 +5020,25 @@ dependencies = [ [[package]] name = "proc-macro-error" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875077759af22fa20b610ad4471d8155b321c89c3f2785526c9839b099be4e0a" +checksum = "052b3c9af39c7e5e94245f820530487d19eb285faedcb40e0c3275132293f242" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "rustversion", "syn", ] [[package]] name = "proc-macro-error-attr" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5717d9fa2664351a01ed73ba5ef6df09c01a521cb42cb65a061432a826f3c7a" +checksum = "d175bef481c7902e63e3165627123fff3502f06ac043d3ef42d08c1246da9253" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "rustversion", "syn", "syn-mid", @@ -5026,8 +5050,8 @@ version = "0.5.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ecd45702f76d6d3c75a80564378ae228a85f0b59d2f3ed43c91b4a69eb2ebfc5" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -5039,20 +5063,25 @@ checksum = "369a6ed065f249a159e06c45752c780bda2fb53c995718f9e484d08daa9eb42e" [[package]] name = "proc-macro2" -version = "0.4.30" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf3d2011ab5c909338f7887f4fc896d35932e29146c12c8d01da6b22a80ba759" +checksum = "6c09721c6781493a2a492a96b5a5bf19b65917fe6728884e7c44dd0c60ca3435" dependencies = [ - "unicode-xid 0.1.0", + "unicode-xid", ] [[package]] -name = "proc-macro2" -version = "1.0.8" +name = "prometheus" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acb317c6ff86a4e579dfa00fc5e6cca91ecbb4e7eb2df0468805b674eb88548" +checksum = "5567486d5778e2c6455b1b90ff1c558f29e751fc018130fa182e15828e728af1" dependencies = [ - "unicode-xid 0.2.0", + "cfg-if", + "fnv", + "lazy_static", + "protobuf", + "quick-error", + "spin", ] [[package]] @@ -5080,7 +5109,7 @@ dependencies = [ "prost", "prost-types", "tempfile", - "which 3.1.0", + "which", ] [[package]] @@ -5091,8 +5120,8 @@ checksum = "537aa19b95acde10a12fec4301466386f757403de4cd4e5b4fa78fb5ecb18f72" dependencies = [ "anyhow", "itertools", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -5106,6 +5135,12 @@ dependencies = [ "prost", ] +[[package]] +name = "protobuf" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6686ddd96a8dbe2687b5f2a687b2cfb520854010ec480f2d74c32e7c9873d3c5" + [[package]] name = "pwasm-utils" version = "0.12.0" @@ -5148,20 +5183,11 @@ dependencies = [ [[package]] name = "quote" -version = "0.6.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ce23b6b870e8f94f81fb0a363d65d86675884b34a09043c81e5562f11c1f8e1" -dependencies = [ - "proc-macro2 0.4.30", -] - -[[package]] -name = "quote" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe" +checksum = "2bdc6c187c65bca4260c9011c9e3132efe4909da44726bad24cf7572ae338d7f" dependencies = [ - "proc-macro2 1.0.8", + "proc-macro2", ] [[package]] @@ -5587,8 +5613,8 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b3bba175698996010c4f6dce5e7f173b6eb781fce25d2cfc45e27091ce0b79f6" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -5655,13 +5681,13 @@ dependencies = [ [[package]] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ - "bytes 0.4.12", + "bytes 0.5.4", "derive_more", "env_logger 0.7.1", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "libp2p", "log 0.4.8", "parity-scale-codec", @@ -5684,14 +5710,13 @@ dependencies = [ [[package]] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "futures 0.3.4", "log 0.4.8", "parity-scale-codec", "parking_lot 0.10.0", "sc-block-builder", - "sc-client", "sc-client-api", "sc-telemetry", "sc-transaction-pool", @@ -5708,7 +5733,7 @@ dependencies = [ [[package]] name = "sc-block-builder" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "parity-scale-codec", "sc-client-api", @@ -5723,7 +5748,7 @@ dependencies = [ [[package]] name = "sc-chain-spec" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "impl-trait-for-tuples", "sc-chain-spec-derive", @@ -5737,17 +5762,17 @@ dependencies = [ [[package]] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "sc-cli" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "ansi_term 0.12.1", "app_dirs", @@ -5778,14 +5803,15 @@ dependencies = [ "sp-runtime", "sp-state-machine", "structopt", + "substrate-prometheus-endpoint", "tempfile", "time", - "tokio 0.2.11", + "tokio 0.2.12", ] [[package]] name = "sc-client" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "env_logger 0.7.1", @@ -5815,6 +5841,7 @@ dependencies = [ "sp-std", "sp-trie", "sp-version", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", "tracing", @@ -5822,7 +5849,7 @@ dependencies = [ [[package]] name = "sc-client-api" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "fnv", @@ -5845,6 +5872,7 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-std", + "sp-storage", "sp-test-primitives", "sp-transaction-pool", "sp-trie", @@ -5853,7 +5881,7 @@ dependencies = [ [[package]] name = "sc-client-db" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "env_logger 0.7.1", "hash-db", @@ -5878,19 +5906,19 @@ dependencies = [ "sp-runtime", "sp-state-machine", "sp-trie", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", ] [[package]] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "env_logger 0.7.1", - "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "log 0.4.8", "parity-scale-codec", "parking_lot 0.10.0", @@ -5918,19 +5946,17 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", ] [[package]] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "env_logger 0.7.1", "fork-tree", - "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "log 0.4.8", "merlin", "num-bigint", @@ -5969,12 +5995,11 @@ dependencies = [ "sp-version", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", ] [[package]] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", @@ -5999,7 +6024,7 @@ dependencies = [ [[package]] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "fork-tree", "parity-scale-codec", @@ -6011,7 +6036,7 @@ dependencies = [ [[package]] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "env_logger 0.7.1", @@ -6034,12 +6059,12 @@ dependencies = [ "substrate-test-runtime-client", "substrate-test-runtime-transaction-pool", "tempfile", - "tokio 0.2.11", + "tokio 0.2.12", ] [[package]] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", @@ -6059,10 +6084,10 @@ dependencies = [ [[package]] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "log 0.4.8", "parity-scale-codec", "parking_lot 0.10.0", @@ -6080,7 +6105,7 @@ dependencies = [ [[package]] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "log 0.4.8", "sc-client-api", @@ -6093,7 +6118,7 @@ dependencies = [ [[package]] name = "sc-executor" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "assert_matches", "derive_more", @@ -6112,6 +6137,7 @@ dependencies = [ "sp-externalities", "sp-io", "sp-panic-handler", + "sp-runtime", "sp-runtime-interface", "sp-serializer", "sp-state-machine", @@ -6126,7 +6152,7 @@ dependencies = [ [[package]] name = "sc-executor-common" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "log 0.4.8", @@ -6141,7 +6167,7 @@ dependencies = [ [[package]] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "log 0.4.8", "parity-scale-codec", @@ -6156,37 +6182,37 @@ dependencies = [ [[package]] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "assert_matches", "log 0.4.8", "parity-scale-codec", "parity-wasm 0.41.0", "sc-executor-common", + "scoped-tls", "sp-allocator", "sp-core", "sp-runtime-interface", "sp-wasm-interface", - "wasmi", "wasmtime", ] [[package]] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "assert_matches", "env_logger 0.7.1", "finality-grandpa", "fork-tree", - "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "log 0.4.8", "parity-scale-codec", "parking_lot 0.10.0", "pin-project", "rand 0.7.3", + "sc-block-builder", "sc-client", "sc-client-api", "sc-keystore", @@ -6207,14 +6233,15 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-state-machine", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", + "tokio 0.2.12", ] [[package]] name = "sc-informant" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "ansi_term 0.12.1", "futures 0.3.4", @@ -6230,7 +6257,7 @@ dependencies = [ [[package]] name = "sc-keystore" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "hex", @@ -6245,7 +6272,7 @@ dependencies = [ [[package]] name = "sc-network" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "assert_matches", "async-std", @@ -6258,16 +6285,17 @@ dependencies = [ "fnv", "fork-tree", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "futures_codec", "libp2p", "linked-hash-map", "linked_hash_set", "log 0.4.8", - "lru 0.4.3", + "lru", "nohash-hasher", "parity-scale-codec", "parking_lot 0.10.0", + "pin-project", "prost", "prost-build", "quickcheck", @@ -6290,12 +6318,12 @@ dependencies = [ "sp-keyring", "sp-runtime", "sp-test-primitives", - "substrate-test-client", + "substrate-prometheus-endpoint", "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", "thiserror", - "unsigned-varint", + "unsigned-varint 0.3.1", "void", "wasm-timer", "zeroize 1.1.0", @@ -6303,14 +6331,13 @@ dependencies = [ [[package]] name = "sc-network-gossip" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ - "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "libp2p", "log 0.4.8", - "lru 0.1.17", + "lru", "parking_lot 0.10.0", "sc-network", "sp-runtime", @@ -6319,12 +6346,11 @@ dependencies = [ [[package]] name = "sc-network-test" -version = "0.8.0" +version = "0.8.0-dev" dependencies = [ "env_logger 0.7.1", - "futures 0.1.29", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "libp2p", "log 0.4.8", "parking_lot 0.10.0", @@ -6341,18 +6367,17 @@ dependencies = [ "substrate-test-runtime", "substrate-test-runtime-client", "tempfile", - "tokio 0.1.22", ] [[package]] name = "sc-offchain" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "bytes 0.5.4", "env_logger 0.7.1", "fnv", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "hyper 0.13.2", "hyper-rustls", "log 0.4.8", @@ -6372,12 +6397,12 @@ dependencies = [ "sp-transaction-pool", "substrate-test-runtime-client", "threadpool", - "tokio 0.2.11", + "tokio 0.2.12", ] [[package]] name = "sc-peerset" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "futures 0.3.4", "libp2p", @@ -6389,7 +6414,7 @@ dependencies = [ [[package]] name = "sc-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "assert_matches", "futures 0.1.29", @@ -6426,7 +6451,7 @@ dependencies = [ [[package]] name = "sc-rpc-api" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", @@ -6448,7 +6473,7 @@ dependencies = [ [[package]] name = "sc-rpc-server" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "jsonrpc-core", "jsonrpc-http-server", @@ -6462,7 +6487,7 @@ dependencies = [ [[package]] name = "sc-runtime-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "sp-allocator", "sp-core", @@ -6475,18 +6500,17 @@ dependencies = [ [[package]] name = "sc-service" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "exit-future", "futures 0.1.29", "futures 0.3.4", "futures-diagnose", - "futures-timer 3.0.1", - "grafana-data-source", + "futures-timer 3.0.2", "lazy_static", "log 0.4.8", - "parity-multiaddr", + "parity-multiaddr 0.5.0", "parity-scale-codec", "parity-util-mem", "parking_lot 0.10.0", @@ -6518,17 +6542,17 @@ dependencies = [ "sp-runtime", "sp-session", "sp-transaction-pool", + "substrate-prometheus-endpoint", "substrate-test-runtime-client", "sysinfo", "target_info", - "tokio 0.2.11", "tracing", "wasm-timer", ] [[package]] name = "sc-service-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "env_logger 0.7.1", "fdlimit", @@ -6548,22 +6572,25 @@ dependencies = [ [[package]] name = "sc-state-db" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "env_logger 0.7.1", "log 0.4.8", "parity-scale-codec", + "parity-util-mem", + "parity-util-mem-derive", "parking_lot 0.10.0", + "sc-client-api", "sp-core", ] [[package]] name = "sc-telemetry" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "bytes 0.5.4", "futures 0.3.4", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "libp2p", "log 0.4.8", "parking_lot 0.10.0", @@ -6580,10 +6607,9 @@ dependencies = [ [[package]] name = "sc-tracing" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "erased-serde", - "grafana-data-source", "log 0.4.8", "parking_lot 0.10.0", "sc-telemetry", @@ -6596,7 +6622,7 @@ dependencies = [ [[package]] name = "sc-transaction-graph" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "assert_matches", "criterion 0.3.1", @@ -6618,7 +6644,7 @@ dependencies = [ [[package]] name = "sc-transaction-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", @@ -6669,10 +6695,16 @@ dependencies = [ ] [[package]] -name = "scopeguard" +name = "scoped-tls" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b42e15e59b18a828bbf5c58ea01debb36b9b096346de35d941dcb89009f24a0d" +checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" + +[[package]] +name = "scopeguard" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "scroll" @@ -6689,8 +6721,8 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8584eea9b9ff42825b46faf46a8c24d2cff13ec152fa2a50df788b87c07ee28" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -6783,16 +6815,16 @@ version = "1.0.104" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "128f9e303a5a29922045a830221b8f78ec74a5f544944f3d5984f8ec3895ef64" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "serde_json" -version = "1.0.47" +version = "1.0.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15913895b61e0be854afd32fd4163fcd2a3df34142cf2cb961b310ce694cbf90" +checksum = "9371ade75d4c2d6cb154141b9752cf3781ec9c05e0e5cf35060e1e70ee7b9c25" dependencies = [ "itoa", "ryu", @@ -6913,8 +6945,8 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a945ec7f7ce853e89ffa36be1e27dce9a43e82ff9093bf3461c30d5da74ed11b" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -6948,7 +6980,7 @@ dependencies = [ "rustc_version", "sha2", "subtle 2.2.2", - "x25519-dalek 0.6.0", + "x25519-dalek", ] [[package]] @@ -6979,7 +7011,7 @@ checksum = "4bf77cb82ba8453b42b6ae1d692e4cdc92f9a47beaf89a847c8be83f4e328ad3" [[package]] name = "sp-allocator" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "log 0.4.8", @@ -6990,7 +7022,7 @@ dependencies = [ [[package]] name = "sp-api" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "hash-db", "parity-scale-codec", @@ -7005,22 +7037,23 @@ dependencies = [ [[package]] name = "sp-api-proc-macro" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "blake2-rfc", "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "sp-api-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "criterion 0.3.1", "parity-scale-codec", "rustversion", + "sc-block-builder", "sp-api", "sp-blockchain", "sp-consensus", @@ -7033,7 +7066,7 @@ dependencies = [ [[package]] name = "sp-application-crypto" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "serde", @@ -7044,7 +7077,7 @@ dependencies = [ [[package]] name = "sp-application-crypto-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "sp-api", "sp-application-crypto", @@ -7055,7 +7088,7 @@ dependencies = [ [[package]] name = "sp-arithmetic" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "criterion 0.3.1", "integer-sqrt", @@ -7070,7 +7103,7 @@ dependencies = [ [[package]] name = "sp-authority-discovery" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-api", @@ -7081,7 +7114,7 @@ dependencies = [ [[package]] name = "sp-authorship" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7091,7 +7124,7 @@ dependencies = [ [[package]] name = "sp-block-builder" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-api", @@ -7102,11 +7135,11 @@ dependencies = [ [[package]] name = "sp-blockchain" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "log 0.4.8", - "lru 0.4.3", + "lru", "parity-scale-codec", "parking_lot 0.10.0", "sp-block-builder", @@ -7117,12 +7150,12 @@ dependencies = [ [[package]] name = "sp-consensus" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", "futures-diagnose", - "futures-timer 3.0.1", + "futures-timer 3.0.2", "libp2p", "log 0.4.8", "parity-scale-codec", @@ -7139,7 +7172,7 @@ dependencies = [ [[package]] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-api", @@ -7152,7 +7185,7 @@ dependencies = [ [[package]] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "parity-scale-codec", "schnorrkel", @@ -7167,7 +7200,7 @@ dependencies = [ [[package]] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-api", @@ -7178,7 +7211,7 @@ dependencies = [ [[package]] name = "sp-core" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "base58", "blake2-rfc", @@ -7222,16 +7255,16 @@ dependencies = [ [[package]] name = "sp-debug-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "sp-externalities" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "environmental", "sp-std", @@ -7240,7 +7273,7 @@ dependencies = [ [[package]] name = "sp-finality-grandpa" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "serde", @@ -7252,7 +7285,7 @@ dependencies = [ [[package]] name = "sp-finality-tracker" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-inherents", @@ -7261,7 +7294,7 @@ dependencies = [ [[package]] name = "sp-inherents" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "parity-scale-codec", @@ -7272,7 +7305,7 @@ dependencies = [ [[package]] name = "sp-io" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "hash-db", "libsecp256k1", @@ -7289,7 +7322,7 @@ dependencies = [ [[package]] name = "sp-keyring" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "lazy_static", "sp-core", @@ -7299,7 +7332,7 @@ dependencies = [ [[package]] name = "sp-offchain" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "sp-api", "sp-runtime", @@ -7307,7 +7340,7 @@ dependencies = [ [[package]] name = "sp-panic-handler" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "backtrace", "log 0.4.8", @@ -7315,7 +7348,7 @@ dependencies = [ [[package]] name = "sp-phragmen" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "rand 0.7.3", "serde", @@ -7327,7 +7360,7 @@ dependencies = [ [[package]] name = "sp-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "serde", "serde_json", @@ -7336,8 +7369,9 @@ dependencies = [ [[package]] name = "sp-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ + "hash256-std-hasher", "impl-trait-for-tuples", "log 0.4.8", "parity-scale-codec", @@ -7356,7 +7390,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "primitive-types", @@ -7375,22 +7409,22 @@ dependencies = [ [[package]] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "Inflector", "proc-macro-crate", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "sp-runtime-interface-test" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "sc-executor", - "sp-core", "sp-io", + "sp-runtime", "sp-runtime-interface", "sp-runtime-interface-test-wasm", "sp-state-machine", @@ -7398,7 +7432,7 @@ dependencies = [ [[package]] name = "sp-runtime-interface-test-wasm" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "sp-core", "sp-io", @@ -7409,7 +7443,7 @@ dependencies = [ [[package]] name = "sp-sandbox" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "assert_matches", "parity-scale-codec", @@ -7423,7 +7457,7 @@ dependencies = [ [[package]] name = "sp-serializer" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "serde", "serde_json", @@ -7431,7 +7465,7 @@ dependencies = [ [[package]] name = "sp-session" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "sp-api", "sp-core", @@ -7441,7 +7475,7 @@ dependencies = [ [[package]] name = "sp-staking" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "parity-scale-codec", "sp-runtime", @@ -7450,7 +7484,7 @@ dependencies = [ [[package]] name = "sp-state-machine" -version = "0.8.0" +version = "0.8.0-alpha.3" dependencies = [ "hash-db", "hex-literal", @@ -7462,6 +7496,7 @@ dependencies = [ "sp-core", "sp-externalities", "sp-panic-handler", + "sp-runtime", "sp-trie", "trie-db", "trie-root", @@ -7469,11 +7504,11 @@ dependencies = [ [[package]] name = "sp-std" -version = "2.0.0" +version = "2.0.0-alpha.3" [[package]] name = "sp-storage" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "impl-serde 0.2.3", "serde", @@ -7483,7 +7518,7 @@ dependencies = [ [[package]] name = "sp-test-primitives" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "parity-scale-codec", "parity-util-mem", @@ -7495,7 +7530,7 @@ dependencies = [ [[package]] name = "sp-timestamp" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7508,7 +7543,7 @@ dependencies = [ [[package]] name = "sp-transaction-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "derive_more", "futures 0.3.4", @@ -7521,7 +7556,7 @@ dependencies = [ [[package]] name = "sp-trie" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "criterion 0.2.11", "hash-db", @@ -7529,6 +7564,7 @@ dependencies = [ "memory-db", "parity-scale-codec", "sp-core", + "sp-runtime", "sp-std", "trie-bench", "trie-db", @@ -7538,7 +7574,7 @@ dependencies = [ [[package]] name = "sp-version" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "impl-serde 0.2.3", "parity-scale-codec", @@ -7549,7 +7585,7 @@ dependencies = [ [[package]] name = "sp-wasm-interface" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "impl-trait-for-tuples", "parity-scale-codec", @@ -7636,8 +7672,8 @@ checksum = "095064aa1f5b94d14e635d0a5684cf140c43ae40a0fd990708d38f5d669e5f64" dependencies = [ "heck", "proc-macro-error", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -7657,14 +7693,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0054a7df764039a6cd8592b9de84be4bec368ff081d203a7d5371cbfa8e65c81" dependencies = [ "heck", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] [[package]] name = "subkey" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "clap", "derive_more", @@ -7704,13 +7740,38 @@ dependencies = [ "sha2", ] +[[package]] +name = "substrate-browser-utils" +version = "0.8.0-alpha.3" +dependencies = [ + "chrono", + "clear_on_drop", + "console_error_panic_hook", + "console_log", + "futures 0.1.29", + "futures 0.3.4", + "futures-timer 3.0.2", + "js-sys", + "kvdb-web", + "libp2p", + "log 0.4.8", + "rand 0.6.5", + "rand 0.7.3", + "sc-chain-spec", + "sc-informant", + "sc-network", + "sc-service", + "wasm-bindgen", + "wasm-bindgen-futures", +] + [[package]] name = "substrate-build-script-utils" -version = "2.0.0" +version = "2.0.0-alpha.3" [[package]] name = "substrate-frame-rpc-support" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "frame-support", "frame-system", @@ -7721,12 +7782,12 @@ dependencies = [ "sc-rpc-api", "serde", "sp-storage", - "tokio 0.1.22", + "tokio 0.2.12", ] [[package]] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "2.0.0-alpha.3" dependencies = [ "env_logger 0.7.1", "frame-system-rpc-runtime-api", @@ -7747,9 +7808,22 @@ dependencies = [ "substrate-test-runtime-client", ] +[[package]] +name = "substrate-prometheus-endpoint" +version = "0.8.0-alpha.3" +dependencies = [ + "async-std", + "derive_more", + "futures-util", + "hyper 0.13.2", + "log 0.4.8", + "prometheus", + "tokio 0.2.12", +] + [[package]] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "futures 0.3.4", "hash-db", @@ -7768,7 +7842,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "cfg-if", "frame-executive", @@ -7809,7 +7883,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-client" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "futures 0.3.4", "parity-scale-codec", @@ -7826,7 +7900,7 @@ dependencies = [ [[package]] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0" +version = "2.0.0-dev" dependencies = [ "derive_more", "futures 0.3.4", @@ -7841,7 +7915,7 @@ dependencies = [ [[package]] name = "substrate-test-utils" -version = "2.0.0" +version = "2.0.0-alpha.3" [[package]] name = "substrate-wasm-builder" @@ -7851,6 +7925,7 @@ dependencies = [ "build-helper", "cargo_metadata", "fs2", + "itertools", "tempfile", "toml", "walkdir", @@ -7875,13 +7950,13 @@ checksum = "7c65d530b10ccaeac294f349038a597e435b18fb456aadd0840a623f83b9e941" [[package]] name = "syn" -version = "1.0.14" +version = "1.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af6f3550d8dff9ef7dc34d384ac6f107e5d31c8f57d9f28e0081503f547ac8f5" +checksum = "123bd9499cfb380418d509322d7a6d52e5315f064fe4b3ad18a53d6b92c07859" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", - "unicode-xid 0.2.0", + "proc-macro2", + "quote", + "unicode-xid", ] [[package]] @@ -7890,8 +7965,8 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7be3539f6c128a931cf19dcee741c1af532c7fd387baa739c03dd2e96479338a" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -7901,10 +7976,10 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "67656ea1dc1b41b1451851562ea232ec2e5a80242139f7e679ceccfb5d61f545" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", - "unicode-xid 0.2.0", + "unicode-xid", ] [[package]] @@ -7938,16 +8013,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c63f48baada5c52e65a29eef93ab4f8982681b67f9e8d29c7b05abcfec2b9ffe" -[[package]] -name = "tempdir" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8" -dependencies = [ - "rand 0.4.6", - "remove_dir_all", -] - [[package]] name = "tempfile" version = "3.1.0" @@ -7978,8 +8043,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a605baa797821796a751f4a959e1206079b24a4b7e1ed302b7d785d81a9276c9" dependencies = [ "lazy_static", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "version_check 0.9.1", ] @@ -7995,21 +8060,21 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "205684fd018ca14432b12cce6ea3d46763311a571c3d294e71ba3f01adcf1aad" +checksum = "ee14bf8e6767ab4c687c9e8bc003879e042a96fd67a3ba5934eadb6536bef4db" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57e4d2e50ca050ed44fb58309bdce3efa79948f84f9993ad1978de5eebdce5a7" +checksum = "a7b51e1fbc44b5a0840be594fbc0f960be09050f2617e61e6aa43bef97cd3ef4" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -8044,9 +8109,9 @@ dependencies = [ [[package]] name = "tiny-bip39" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd1fb03fe8e07d17cd851a624a9fff74642a997b67fbd1ccd77533241640d92" +checksum = "a6848cd8f566953ce1e8faeba12ee23cbdbb0437754792cd857d44628b5685e3" dependencies = [ "failure", "hmac", @@ -8055,6 +8120,7 @@ dependencies = [ "rand 0.7.3", "rustc-hash", "sha2", + "unicode-normalization", ] [[package]] @@ -8111,9 +8177,9 @@ dependencies = [ [[package]] name = "tokio" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fdd17989496f49cdc57978c96f0c9fe5e4a58a8bddc6813c449a4624f6a030b" +checksum = "b34bee1facdc352fba10c9c58b654e6ecb6a2250167772bf86071f7c5f2f5061" dependencies = [ "bytes 0.5.4", "fnv", @@ -8208,12 +8274,12 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4b1e7ed7d5d4c2af3d999904b0eebe76544897cdbfb2b9684bed2174ab20f7c" +checksum = "f0c3acc6aa564495a0f2e1d59fab677cd7f81a19994cfc7f3ad0e64301560389" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", ] @@ -8244,7 +8310,7 @@ checksum = "141afec0978abae6573065a48882c6bae44c5cc61db9b511ac4abf6a09bfd9cc" dependencies = [ "futures-core", "rustls", - "tokio 0.2.11", + "tokio 0.2.12", "webpki", ] @@ -8367,7 +8433,7 @@ dependencies = [ "futures-sink", "log 0.4.8", "pin-project-lite", - "tokio 0.2.11", + "tokio 0.2.12", ] [[package]] @@ -8387,9 +8453,9 @@ checksum = "e987b6bf443f4b5b3b6f38704195592cca41c5bb7aedd3c3693c7081f8289860" [[package]] name = "tracing" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e213bd24252abeb86a0b7060e02df677d367ce6cb772cef17e9214b8390a8d3" +checksum = "1721cc8cf7d770cc4257872507180f35a4797272f5962f24c806af9e7faf52ab" dependencies = [ "cfg-if", "tracing-attributes", @@ -8398,19 +8464,19 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cfd395def5a60236e187e1ff905cb55668a59f29928dec05e6e1b1fd2ac1f3" +checksum = "7fbad39da2f9af1cae3016339ad7f2c7a9e870f12e8fd04c4fd7ef35b30c0d2b" dependencies = [ - "quote 1.0.2", + "quote", "syn", ] [[package]] name = "tracing-core" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13a46f11e372b8bd4b4398ea54353412fdd7fd42a8370c7e543e218cf7661978" +checksum = "0aa83a9a47081cd522c09c81b31aec2c9273424976f922ad61c053b58350b715" dependencies = [ "lazy_static", ] @@ -8450,7 +8516,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de9222c50cc325855621271157c973da27a0dcd26fa06f8edf81020bd2333df0" dependencies = [ "hash-db", - "hashbrown 0.6.3", + "hashbrown", "log 0.4.8", "rustc-hex", "smallvec 1.2.0", @@ -8483,9 +8549,9 @@ checksum = "e604eb7b43c06650e854be16a2a03155743d3752dd1c943f6829e26b7a36e382" [[package]] name = "trybuild" -version = "1.0.21" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f5b3f750c701725331ac78e389b5d143b7d25f6b6ffffd0d419759a9063ac5f" +checksum = "26ff1b18659a2218332848d76ad1c867ce4c6ee37b085e6bc8de9a6d11401220" dependencies = [ "glob 0.3.0", "lazy_static", @@ -8589,23 +8655,24 @@ checksum = "caaa9d531767d1ff2150b9332433f32a24622147e5ebb1f26409d5da67afd479" [[package]] name = "unicode-xid" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" +checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" [[package]] -name = "unicode-xid" -version = "0.2.0" +name = "unsigned-varint" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c" +checksum = "a7f0023a96687fe169081e8adce3f65e3874426b7886e9234d490af2dc077959" [[package]] name = "unsigned-varint" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c689459fbaeb50e56c6749275f084decfd02194ac5852e6617d95d0d3cf02eaf" +checksum = "3b7ffb36714206d2f5f05d61a2bc350415c642f2c54433f0ebf829afbe41d570" dependencies = [ "bytes 0.5.4", + "futures 0.3.4", "futures_codec", ] @@ -8637,6 +8704,12 @@ dependencies = [ "percent-encoding 2.1.0", ] +[[package]] +name = "uuid" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fde2f6a4bea1d6e007c4ad38c6839fa71cbb63b6dbf5b595aa38dc9b1093c11" + [[package]] name = "vcpkg" version = "0.2.8" @@ -8758,8 +8831,8 @@ dependencies = [ "bumpalo", "lazy_static", "log 0.4.8", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "wasm-bindgen-shared", ] @@ -8782,7 +8855,7 @@ version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "574094772ce6921576fb6f2e3f7497b8a76273b6db092be18fc48a082de09dc3" dependencies = [ - "quote 1.0.2", + "quote", "wasm-bindgen-macro-support", ] @@ -8792,8 +8865,8 @@ version = "0.2.58" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e85031354f25eaebe78bb7db1c3d86140312a911a106b2e29f9cc440ce3e7668" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "wasm-bindgen-backend", "wasm-bindgen-shared", @@ -8814,8 +8887,8 @@ dependencies = [ "anyhow", "heck", "log 0.4.8", - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "wasm-bindgen-backend", "weedle", @@ -8874,21 +8947,14 @@ dependencies = [ [[package]] name = "wasmparser" -version = "0.48.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "073da89bf1c84db000dd68ce660c1b4a08e3a2d28fd1e3394ab9e7abdde4a0f8" - -[[package]] -name = "wasmparser" -version = "0.51.1" +version = "0.51.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e41b27a1677fe28c115de49efca55dabb14f7fece2c32947ffb9b1064fe5bd4" +checksum = "aeb1956b19469d1c5e63e459d29e7b5aa0f558d9f16fcef09736f8a265e6c10a" [[package]] name = "wasmtime" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5614d964c3e7d07a13b59aca66103c52656bd80430f0d86dc7eeb3af4f03d4a2" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "anyhow", "backtrace", @@ -8898,9 +8964,10 @@ dependencies = [ "region", "rustc-demangle", "target-lexicon", - "wasmparser 0.51.1", + "wasmparser", "wasmtime-environ", "wasmtime-jit", + "wasmtime-profiling", "wasmtime-runtime", "wat", "winapi 0.3.8", @@ -8908,25 +8975,23 @@ dependencies = [ [[package]] name = "wasmtime-debug" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb5900275b4ef0b621ce725b9d5660b12825d7f7d79b392b97baf089ffab8c0" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "anyhow", "faerie", - "gimli 0.19.0", + "gimli", "more-asserts", "target-lexicon", "thiserror", - "wasmparser 0.51.1", + "wasmparser", "wasmtime-environ", ] [[package]] name = "wasmtime-environ" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f04661851e133fb11691c4a0f92a705766b4bbf7afc06811f949e295cc8414fc" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "anyhow", "base64 0.11.0", @@ -8946,16 +9011,15 @@ dependencies = [ "sha2", "thiserror", "toml", - "wasmparser 0.51.1", + "wasmparser", "winapi 0.3.8", "zstd", ] [[package]] name = "wasmtime-jit" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d451353764ce55c9bb6a8b260063cfc209b7adadd277a9a872ab4563a69e357c" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "anyhow", "cfg-if", @@ -8968,46 +9032,63 @@ dependencies = [ "region", "target-lexicon", "thiserror", - "wasmparser 0.51.1", + "wasmparser", "wasmtime-debug", "wasmtime-environ", + "wasmtime-profiling", "wasmtime-runtime", "winapi 0.3.8", ] +[[package]] +name = "wasmtime-profiling" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" +dependencies = [ + "gimli", + "goblin", + "lazy_static", + "libc", + "object", + "scroll", + "serde", + "target-lexicon", +] + [[package]] name = "wasmtime-runtime" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dbd4fc114b828cae3e405fed413df4b3814d87a92ea029640cec9ba41f0c162" +version = "0.12.0" +source = "git+https://github.com/paritytech/wasmtime?branch=a-thread-safe-api#851887d84d03543f931f6312448d0dd5d8a9324e" dependencies = [ "backtrace", "cc", "cfg-if", "indexmap", + "lazy_static", "libc", "memoffset", "more-asserts", "region", "thiserror", "wasmtime-environ", + "wasmtime-profiling", "winapi 0.3.8", ] [[package]] name = "wast" -version = "7.0.0" +version = "9.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12a729d076deb29c8509fa71f2d427729f9394f9496844ed8fcab152f35d163d" +checksum = "ee7b16105405ca2aa2376ba522d8d4b1a11604941dd3bb7df9fd2ece60f8d16a" dependencies = [ "leb128", ] [[package]] name = "wat" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5795e34a4b39893653dec97e644fac85c31398e0ce1abecc48967aac83d9e8ce" +checksum = "56173f7f4fb59aebe35a7e71423845e1c6c7144bfb56362d497931b6b3bed0f6" dependencies = [ "wast", ] @@ -9103,16 +9184,6 @@ dependencies = [ "nom", ] -[[package]] -name = "which" -version = "2.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b57acb10231b9493c8472b20cb57317d0679a49e0bdbee44b3b803a6473af164" -dependencies = [ - "failure", - "libc", -] - [[package]] name = "which" version = "3.1.0" @@ -9193,17 +9264,6 @@ dependencies = [ "winapi-build", ] -[[package]] -name = "x25519-dalek" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ee1585dc1484373cbc1cee7aafda26634665cf449436fd6e24bfd1fad230538" -dependencies = [ - "clear_on_drop", - "curve25519-dalek 1.2.3", - "rand_core 0.3.1", -] - [[package]] name = "x25519-dalek" version = "0.6.0" @@ -9223,9 +9283,9 @@ checksum = "d089681aa106a86fade1b0128fb5daf07d5867a509ab036d99988dec80429a57" [[package]] name = "yamux" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d73295bc9d9acf89dd9336b3b5f5b57731ee72b587857dd4312721a0196b48e5" +checksum = "f03098897b734bd943ab23f6aa9f98aafd72a88516deedd66f9d564c57bf2f19" dependencies = [ "bytes 0.5.4", "futures 0.3.4", @@ -9257,8 +9317,8 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de251eec69fc7c1bc3923403d18ececb929380e016afe103da75f396704f8ca2" dependencies = [ - "proc-macro2 1.0.8", - "quote 1.0.2", + "proc-macro2", + "quote", "syn", "synstructure", ] diff --git a/Cargo.toml b/Cargo.toml index 2dc0c8926cb48eec0e74b32ef13fdfd431f066dc..0459bc8ebbd6abd658db6295fefc4c699291207e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -53,9 +53,8 @@ members = [ "client/telemetry", "client/transaction-pool", "client/transaction-pool/graph", + "utils/prometheus", "utils/wasm-builder-runner", - "utils/grafana-data-source", - "utils/grafana-data-source/test", "frame/assets", "frame/aura", "frame/authority-discovery", @@ -63,6 +62,7 @@ members = [ "frame/babe", "frame/balances", "frame/benchmarking", + "frame/benchmark", "frame/collective", "frame/contracts", "frame/contracts/rpc", @@ -72,6 +72,7 @@ members = [ "frame/elections", "frame/evm", "frame/example", + "frame/example-offchain-worker", "frame/executive", "frame/finality-tracker", "frame/generic-asset", diff --git a/bin/node-template/node/Cargo.toml b/bin/node-template/node/Cargo.toml index 9ad4a0e8a55ad5ad82a07252f1d62fc12e8ce101..1e8c3fad2e3c0e2f1a8cef0f2d7fff57f2b45ff3 100644 --- a/bin/node-template/node/Cargo.toml +++ b/bin/node-template/node/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "node-template" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Anonymous"] edition = "2018" license = "Unlicense" build = "build.rs" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [[bin]] name = "node-template" @@ -14,25 +16,26 @@ futures = "0.3.1" log = "0.4.8" structopt = "0.3.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-executor = { version = "0.8", path = "../../../client/executor" } -sc-service = { version = "0.8", path = "../../../client/service" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8", path = "../../../client/network" } -sc-consensus-aura = { version = "0.8", path = "../../../client/consensus/aura" } -sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sc-client = { version = "0.8", path = "../../../client/" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sc-basic-authorship = { path = "../../../client/basic-authorship" } +sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } +sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8.0-alpha.2", path = "../../../client/network" } +sc-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../client/consensus/aura" } +sp-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/aura" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sc-basic-authorship = { path = "../../../client/basic-authorship" , version = "0.8.0-alpha.2"} -node-template-runtime = { version = "2.0.0", path = "../runtime" } +node-template-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } [build-dependencies] vergen = "3.0.4" -build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } +build-script-utils = { version = "2.0.0-alpha.2", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index 9bdfea3b7820d5ed80e5912d65de7e91a3b14f9a..64b84005072fda218c5b866e8ec8a1898e27df18 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -1,7 +1,7 @@ use sp_core::{Pair, Public, sr25519}; use node_template_runtime::{ AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, - IndicesConfig, SudoConfig, SystemConfig, WASM_BINARY, Signature + SudoConfig, SystemConfig, WASM_BINARY, Signature }; use sp_consensus_aura::sr25519::{AuthorityId as AuraId}; use grandpa_primitives::{AuthorityId as GrandpaId}; @@ -127,21 +127,18 @@ fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, code: WASM_BINARY.to_vec(), changes_trie_config: Default::default(), }), - indices: Some(IndicesConfig { - indices: vec![], - }), balances: Some(BalancesConfig { balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), }), - sudo: Some(SudoConfig { - key: root_key, - }), aura: Some(AuraConfig { authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), }), grandpa: Some(GrandpaConfig { authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), }), + sudo: Some(SudoConfig { + key: root_key, + }), } } diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index e7e386703deee0a7894dfc5bf931c6c41fa89438..0f4c301dbff5b7defe0565bf71e85b21a6418867 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -15,32 +15,35 @@ // along with Substrate. If not, see . use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_cli::{VersionInfo, error}; +use sc_cli::VersionInfo; use crate::service; use crate::chain_spec; use crate::cli::Cli; /// Parse and run command line arguments -pub fn run(version: VersionInfo) -> error::Result<()> { +pub fn run(version: VersionInfo) -> sc_cli::Result<()> { let opt = sc_cli::from_args::(&version); - let config = sc_service::Configuration::new(&version); + let mut config = sc_service::Configuration::from_version(&version); match opt.subcommand { - Some(subcommand) => sc_cli::run_subcommand( - config, - subcommand, - chain_spec::load_spec, - |config: _| Ok(new_full_start!(config).0), - &version, - ), - None => sc_cli::run( - config, - opt.run, - service::new_light, - service::new_full, - chain_spec::load_spec, - &version, - ) + Some(subcommand) => { + subcommand.init(&version)?; + subcommand.update_config(&mut config, chain_spec::load_spec, &version)?; + subcommand.run( + config, + |config: _| Ok(new_full_start!(config).0), + ) + }, + None => { + opt.run.init(&version)?; + opt.run.update_config(&mut config, chain_spec::load_spec, &version)?; + opt.run.run( + config, + service::new_light, + service::new_full, + &version, + ) + }, } } diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 9d0a57d77a851d551e9a8e087365a8d8cb470977..91b2c257e0cd733f7e87d00e02857101810a2fb0 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -7,10 +7,8 @@ mod service; mod cli; mod command; -pub use sc_cli::{VersionInfo, error}; - -fn main() -> Result<(), error::Error> { - let version = VersionInfo { +fn main() -> sc_cli::Result<()> { + let version = sc_cli::VersionInfo { name: "Substrate Node", commit: env!("VERGEN_SHA_SHORT"), version: env!("CARGO_PKG_VERSION"), diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index 5466f3e919c0baa4bed2b83622fdf35d5987e5b8..f289ff58549dbc354bfdb706f8edf1659f6b044a 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -3,14 +3,14 @@ use std::sync::Arc; use std::time::Duration; use sc_client::LongestChain; +use sc_client_api::ExecutorProvider; use node_template_runtime::{self, GenesisConfig, opaque::Block, RuntimeApi}; use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; use sp_inherents::InherentDataProviders; -use sc_network::{construct_simple_protocol}; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; // Our native executor instance. native_executor_instance!( @@ -19,17 +19,13 @@ native_executor_instance!( node_template_runtime::native_version, ); -construct_simple_protocol! { - /// Demo protocol attachment for substrate. - pub struct NodeProtocol where Block = Block { } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. macro_rules! new_full_start { ($config:expr) => {{ + use std::sync::Arc; let mut import_setup = None; let inherent_data_providers = sp_inherents::InherentDataProviders::new(); @@ -43,27 +39,24 @@ macro_rules! new_full_start { let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); Ok(sc_transaction_pool::BasicPool::new(config, std::sync::Arc::new(pool_api))) })? - .with_import_queue(|_config, client, mut select_chain, transaction_pool| { + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { let select_chain = select_chain.take() .ok_or_else(|| sc_service::Error::SelectChainRequired)?; let (grandpa_block_import, grandpa_link) = - grandpa::block_import::<_, _, _, node_template_runtime::RuntimeApi, _>( - client.clone(), &*client, select_chain - )?; + grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( grandpa_block_import.clone(), client.clone(), ); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, _>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( + sc_consensus_aura::slot_duration(&*client)?, aura_block_import, Some(Box::new(grandpa_block_import.clone())), None, client, inherent_data_providers.clone(), - Some(transaction_pool), )?; import_setup = Some((grandpa_block_import, grandpa_link)); @@ -95,17 +88,19 @@ pub fn new_full(config: Configuration) import_setup.take() .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - let service = builder.with_network_protocol(|_| Ok(NodeProtocol::new()))? - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) - )? + let service = builder + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? .build()?; if participates_in_consensus { - let proposer = sc_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; + let proposer = sc_basic_authorship::ProposerFactory::new( + service.client(), + service.transaction_pool() + ); let client = service.client(); let select_chain = service.select_chain() @@ -115,7 +110,7 @@ pub fn new_full(config: Configuration) sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + sc_consensus_aura::slot_duration(&*client)?, client, select_chain, block_import, @@ -145,44 +140,41 @@ pub fn new_full(config: Configuration) gossip_duration: Duration::from_millis(333), justification_period: 512, name: Some(name), - observer_enabled: true, + observer_enabled: false, keystore, is_authority, }; - match (is_authority, disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer( - grandpa_config, - grandpa_link, - service.network(), - service.on_exit(), - )?); - }, - (true, false) => { - // start the full GRANDPA voter - let voter_config = grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task("grandpa", grandpa::run_grandpa_voter(voter_config)?); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - }, + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry: service.prometheus_registry() + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; } Ok(service) @@ -212,28 +204,31 @@ pub fn new_light(config: Configuration) let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi>( - client.clone(), backend, &*client.clone(), Arc::new(fetch_checker), + let grandpa_block_import = grandpa::light_block_import( + client.clone(), + backend, + &(client.clone() as Arc<_>), + Arc::new(fetch_checker), )?; let finality_proof_import = grandpa_block_import.clone(); let finality_proof_request_builder = finality_proof_import.create_finality_proof_request_builder(); - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair, ()>( - sc_consensus_aura::SlotDuration::get_or_compute(&*client)?, + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( + sc_consensus_aura::slot_duration(&*client)?, grandpa_block_import, None, Some(Box::new(finality_proof_import)), client, inherent_data_providers.clone(), - None, )?; Ok((import_queue, finality_proof_request_builder)) })? - .with_network_protocol(|_| Ok(NodeProtocol::new()))? - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) - )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? .build() } diff --git a/bin/node-template/pallets/template/Cargo.toml b/bin/node-template/pallets/template/Cargo.toml index 8ea3f3adabc52c2c87802b2f407d188f14bd202c..b39fcc1dae4d5bdb3b71edfaac02c55783879e29 100644 --- a/bin/node-template/pallets/template/Cargo.toml +++ b/bin/node-template/pallets/template/Cargo.toml @@ -2,43 +2,47 @@ authors = ['Anonymous'] edition = '2018' name = 'pallet-template' -version = '2.0.0' +version = "2.0.0-alpha.3" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet template" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } safe-mix = { default-features = false, version = '1.0.0' } [dependencies.frame-support] default-features = false -version = '2.0.0' +version = "2.0.0-alpha.2" path = "../../../../frame/support" [dependencies.system] default-features = false package = 'frame-system' -version = '2.0.0' +version = "2.0.0-alpha.2" path = "../../../../frame/system" - [dev-dependencies.sp-core] default-features = false -version = '2.0.0' +version = "2.0.0-alpha.2" path = "../../../../primitives/core" [dev-dependencies.sp-io] default-features = false -version = '2.0.0' +version = "2.0.0-alpha.2" path = "../../../../primitives/io" [dev-dependencies.sp-runtime] default-features = false -version = '2.0.0' +version = "2.0.0-alpha.2" path = "../../../../primitives/runtime" + [features] default = ['std'] std = [ - 'codec/std', - 'frame-support/std', - 'safe-mix/std', - 'system/std' + 'codec/std', + 'frame-support/std', + 'safe-mix/std', + 'system/std' ] diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index 7a23610e4b0a396039e01a563d591004ed121c7b..2ea81ffb456261122b234c6ae3c36f52eaa9c430 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -41,7 +41,7 @@ impl system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for Test { type Event = (); diff --git a/bin/node-template/runtime/Cargo.toml b/bin/node-template/runtime/Cargo.toml index ddecb0e4cff4c11da2ddf6e7973c1c57e082d1e1..9268dd8c05035f4dd8022702e9792f421ab556c5 100644 --- a/bin/node-template/runtime/Cargo.toml +++ b/bin/node-template/runtime/Cargo.toml @@ -1,42 +1,43 @@ [package] name = "node-template-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Anonymous"] edition = "2018" license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } -aura = { version = "2.0.0", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } -balances = { version = "2.0.0", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -grandpa = { version = "2.0.0", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } -indices = { version = "2.0.0", default-features = false, package = "pallet-indices", path = "../../../frame/indices" } -randomness-collective-flip = { version = "2.0.0", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } -sudo = { version = "2.0.0", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } -system = { version = "2.0.0", default-features = false, package = "frame-system", path = "../../../frame/system" } -timestamp = { version = "2.0.0", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } -transaction-payment = { version = "2.0.0", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } +aura = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-aura", path = "../../../frame/aura" } +balances = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-balances", path = "../../../frame/balances" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } +grandpa = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-grandpa", path = "../../../frame/grandpa" } +randomness-collective-flip = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-randomness-collective-flip", path = "../../../frame/randomness-collective-flip" } +sudo = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-sudo", path = "../../../frame/sudo" } +system = { version = "2.0.0-alpha.2", default-features = false, package = "frame-system", path = "../../../frame/system" } +timestamp = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-timestamp", path = "../../../frame/timestamp" } +transaction-payment = { version = "2.0.0-alpha.2", default-features = false, package = "pallet-transaction-payment", path = "../../../frame/transaction-payment" } +frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/executive" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-consensus-aura = { version = "0.8", default-features = false, path = "../../../primitives/consensus/aura" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-inherents = { path = "../../../primitives/inherents", default-features = false} -sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/api" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.2"} +sp-consensus-aura = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/consensus/aura" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } +sp-inherents = { path = "../../../primitives/inherents", default-features = false, version = "2.0.0-alpha.2"} +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/io" } +sp-offchain = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/offchain" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/session" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } +sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/version" } -template = { version = "2.0.0", default-features = false, path = "../pallets/template", package = "pallet-template" } +template = { version = "2.0.0-alpha.2", default-features = false, path = "../pallets/template", package = "pallet-template" } [build-dependencies] -wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [features] default = ["std"] @@ -47,7 +48,6 @@ std = [ "frame-executive/std", "frame-support/std", "grandpa/std", - "indices/std", "randomness-collective-flip/std", "serde", "sp-api/std", diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index a1bcd157ad63a7e677a92ea4f2cdcc4c444ce040..2bc4c2745007beb069900c48220efc8fb6f94569 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -15,7 +15,7 @@ use sp_runtime::{ impl_opaque_keys, MultiSignature, }; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, StaticLookup, Verify, ConvertInto, IdentifyAccount + BlakeTwo256, Block as BlockT, IdentityLookup, Verify, ConvertInto, IdentifyAccount }; use sp_api::impl_runtime_apis; use sp_consensus_aura::sr25519::AuthorityId as AuraId; @@ -132,7 +132,7 @@ impl system::Trait for Runtime { /// The aggregated dispatch type that is available for extrinsics. type Call = Call; /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = Indices; + type Lookup = IdentityLookup; /// The index type for storing how many extrinsics an account has signed. type Index = Index; /// The index type for blocks. @@ -164,7 +164,7 @@ impl system::Trait for Runtime { /// What to do if a new account is created. type OnNewAccount = (); /// What to do if an account is fully reaped from the system. - type OnReapAccount = Balances; + type OnKilledAccount = (); /// The data to be stored in an account. type AccountData = balances::AccountData; } @@ -177,23 +177,6 @@ impl grandpa::Trait for Runtime { type Event = Event; } -parameter_types! { - /// How much an index costs. - pub const IndexDeposit: u128 = 100; -} - -impl indices::Trait for Runtime { - /// The type for recording indexing into the account enumeration. If this ever overflows, there - /// will be problems! - type AccountIndex = AccountIndex; - /// The ubiquitous event type. - type Event = Event; - /// The currency type. - type Currency = Balances; - /// How much an index costs. - type Deposit = IndexDeposit; -} - parameter_types! { pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } @@ -250,21 +233,20 @@ construct_runtime!( UncheckedExtrinsic = UncheckedExtrinsic { System: system::{Module, Call, Config, Storage, Event}, + RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, Timestamp: timestamp::{Module, Call, Storage, Inherent}, Aura: aura::{Module, Config, Inherent(Timestamp)}, Grandpa: grandpa::{Module, Call, Storage, Config, Event}, - Indices: indices::{Module, Call, Storage, Event, Config}, Balances: balances::{Module, Call, Storage, Config, Event}, TransactionPayment: transaction_payment::{Module, Storage}, Sudo: sudo::{Module, Call, Config, Storage, Event}, // Used for the module template in `./template.rs` TemplateModule: template::{Module, Call, Storage, Event}, - RandomnessCollectiveFlip: randomness_collective_flip::{Module, Call, Storage}, } ); /// The address format for describing accounts. -pub type Address = ::Source; +pub type Address = AccountId; /// Block header type as expected by this runtime. pub type Header = generic::Header; /// Block type as expected by this runtime. diff --git a/bin/node/cli/Cargo.toml b/bin/node/cli/Cargo.toml index d383e2c05a944bab30885e60c452211b9270cf67..e18b6b228e65553543891d6e5cbcd6fb6094c752 100644 --- a/bin/node/cli/Cargo.toml +++ b/bin/node/cli/Cargo.toml @@ -1,12 +1,14 @@ [package] name = "node-cli" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." build = "build.rs" edition = "2018" license = "GPL-3.0" default-run = "substrate" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [package.metadata.wasm-pack.profile.release] # `wasm-opt` has some problems on linux, see @@ -29,7 +31,7 @@ crate-type = ["cdylib", "rlib"] [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.0.6" } +codec = { package = "parity-scale-codec", version = "1.2.0" } serde = { version = "1.0.102", features = ["derive"] } futures = { version = "0.3.1", features = ["compat"] } hex-literal = "0.2.1" @@ -40,83 +42,84 @@ structopt = { version = "0.3.8", optional = true } tracing = "0.1.10" # primitives -sp-authority-discovery = { version = "2.0.0", path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../primitives/finality-tracker" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +sp-authority-discovery = { version = "2.0.0-alpha.2", path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } +grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../../primitives/finality-grandpa" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/timestamp" } +sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/finality-tracker" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } # client dependencies -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "0.8", path = "../../../client/" } -sc-chain-spec = { version = "2.0.0", path = "../../../client/chain-spec" } -sc-transaction-pool = { version = "2.0.0", path = "../../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-network = { version = "0.8", path = "../../../client/network" } -sc-consensus-babe = { version = "0.8", path = "../../../client/consensus/babe" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } -sc-client-db = { version = "0.8", default-features = false, path = "../../../client/db" } -sc-offchain = { version = "2.0.0", path = "../../../client/offchain" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } -sc-basic-authorship = { version = "0.8", path = "../../../client/basic-authorship" } -sc-service = { version = "0.8", default-features = false, path = "../../../client/service" } -sc-tracing = { version = "2.0.0", path = "../../../client/tracing" } -sc-telemetry = { version = "2.0.0", path = "../../../client/telemetry" } -sc-authority-discovery = { version = "0.8", path = "../../../client/authority-discovery" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } +sc-chain-spec = { version = "2.0.0-alpha.2", path = "../../../client/chain-spec" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } +sc-network = { version = "0.8.0-alpha.2", path = "../../../client/network" } +sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe" } +grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../../../client/finality-grandpa" } +sc-client-db = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/db" } +sc-offchain = { version = "2.0.0-alpha.2", path = "../../../client/offchain" } +sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } +sc-basic-authorship = { version = "0.8.0-alpha.2", path = "../../../client/basic-authorship" } +sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } +sc-tracing = { version = "2.0.0-alpha.2", path = "../../../client/tracing" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../../../client/telemetry" } +sc-authority-discovery = { version = "0.8.0-alpha.2", path = "../../../client/authority-discovery" } # frame dependencies -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-authority-discovery = { version = "2.0.0", path = "../../../frame/authority-discovery" } +pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/timestamp" } +pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } +frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } +pallet-im-online = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/im-online" } +pallet-authority-discovery = { version = "2.0.0-alpha.2", path = "../../../frame/authority-discovery" } # node-specific dependencies -node-runtime = { version = "2.0.0", path = "../runtime" } -node-rpc = { version = "2.0.0", path = "../rpc" } -node-primitives = { version = "2.0.0", path = "../primitives" } -node-executor = { version = "2.0.0", path = "../executor" } +node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +node-rpc = { version = "2.0.0-alpha.2", path = "../rpc" } +node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } +node-executor = { version = "2.0.0-alpha.2", path = "../executor" } # CLI-specific dependencies -sc-cli = { version = "0.8.0", optional = true, path = "../../../client/cli" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } -node-transaction-factory = { version = "0.8.0", optional = true, path = "../transaction-factory" } -node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } +sc-cli = { version = "0.8.0-alpha.2", optional = true, path = "../../../client/cli" } +frame-benchmarking-cli = { version = "2.0.0-alpha.2", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-transaction-factory = { version = "0.8.0-alpha.2", optional = true, path = "../transaction-factory" } +node-inspect = { version = "0.8.0-alpha.2", optional = true, path = "../inspect" } # WASM-specific dependencies wasm-bindgen = { version = "0.2.57", optional = true } wasm-bindgen-futures = { version = "0.4.7", optional = true } -browser-utils = { path = "../../../utils/browser", optional = true } +browser-utils = { package = "substrate-browser-utils", path = "../../../utils/browser", optional = true, version = "0.8.0-alpha.2" } [dev-dependencies] -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-consensus-babe = { version = "0.8", features = ["test-helpers"], path = "../../../client/consensus/babe" } -sc-consensus-epochs = { version = "0.8", path = "../../../client/consensus/epochs" } -sc-service-test = { version = "2.0.0", path = "../../../client/service/test" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } +sc-consensus-babe = { version = "0.8.0-alpha.2", features = ["test-helpers"], path = "../../../client/consensus/babe" } +sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../../client/consensus/epochs" } +sc-service-test = { version = "2.0.0-dev", path = "../../../client/service/test" } futures = "0.3.1" tempfile = "3.1.0" assert_cmd = "0.12" nix = "0.17" +serde_json = "1.0" [build-dependencies] -build-script-utils = { version = "2.0.0", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } +build-script-utils = { version = "2.0.0-alpha.2", package = "substrate-build-script-utils", path = "../../../utils/build-script-utils" } structopt = { version = "0.3.8", optional = true } -node-transaction-factory = { version = "0.8.0", optional = true, path = "../transaction-factory" } -node-inspect = { version = "0.8.0", optional = true, path = "../inspect" } -frame-benchmarking-cli = { version = "2.0.0", optional = true, path = "../../../utils/frame/benchmarking-cli" } +node-transaction-factory = { version = "0.8.0-alpha.2", optional = true, path = "../transaction-factory" } +node-inspect = { version = "0.8.0-alpha.2", optional = true, path = "../inspect" } +frame-benchmarking-cli = { version = "2.0.0-alpha.2", optional = true, path = "../../../utils/frame/benchmarking-cli" } [build-dependencies.sc-cli] -version = "0.8.0" +version = "0.8.0-alpha.2" package = "sc-cli" path = "../../../client/cli" optional = true @@ -148,3 +151,4 @@ wasmtime = [ "sc-cli/wasmtime", "sc-service/wasmtime", ] +runtime-benchmarks = [ "node-runtime/runtime-benchmarks" ] diff --git a/bin/node/cli/bin/main.rs b/bin/node/cli/bin/main.rs index e951c04710b92ff387b2f5fd5ba3c144b68af05c..8c4412667baceec56904864413178cbc88001497 100644 --- a/bin/node/cli/bin/main.rs +++ b/bin/node/cli/bin/main.rs @@ -18,10 +18,8 @@ #![warn(missing_docs)] -use sc_cli::VersionInfo; - -fn main() -> Result<(), sc_cli::error::Error> { - let version = VersionInfo { +fn main() -> sc_cli::Result<()> { + let version = sc_cli::VersionInfo { name: "Substrate Node", commit: env!("VERGEN_SHA_SHORT"), version: env!("CARGO_PKG_VERSION"), diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 5cdfb5cab86f591343a18596aa5ee49c9f3d3ee7..af24db704c379e4ca11faf0a359e03afb04167f8 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -243,11 +243,10 @@ pub fn testnet_genesis( }), pallet_session: Some(SessionConfig { keys: initial_authorities.iter().map(|x| { - (x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone())) + (x.0.clone(), x.0.clone(), session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone())) }).collect::>(), }), pallet_staking: Some(StakingConfig { - current_era: 0, validator_count: initial_authorities.len() as u32 * 2, minimum_validator_count: initial_authorities.len() as u32, stakers: initial_authorities.iter().map(|x| { diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 40f1dcf6f428d62df565074eb12d68803e624d39..b6db9c3deb7e31f8f8da62d30b6971bf572b30d1 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -19,11 +19,6 @@ use structopt::StructOpt; /// An overarching CLI command definition. #[derive(Clone, Debug, StructOpt)] -#[structopt(settings = &[ - structopt::clap::AppSettings::GlobalVersion, - structopt::clap::AppSettings::ArgsNegateSubcommands, - structopt::clap::AppSettings::SubcommandsNegateReqs, -])] pub struct Cli { /// Possible subcommand with parameters. #[structopt(subcommand)] diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index 5a942d964c9f962f5c6b4b9069ec25f05ac8dede..dfdf5533f2b343e5f3fbe372a4b3d058f9c97f07 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -14,13 +14,13 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sc_cli::{VersionInfo, error}; +use sc_cli::VersionInfo; use sc_service::{Roles as ServiceRoles}; use node_transaction_factory::RuntimeAdapter; use crate::{Cli, service, ChainSpec, load_spec, Subcommand, factory_impl::FactoryState}; /// Parse command line arguments into service configuration. -pub fn run(args: I, version: VersionInfo) -> error::Result<()> +pub fn run(args: I, version: VersionInfo) -> sc_cli::Result<()> where I: Iterator, T: Into + Clone, @@ -28,19 +28,22 @@ where let args: Vec<_> = args.collect(); let opt = sc_cli::from_iter::(args.clone(), &version); - let mut config = sc_service::Configuration::new(&version); + let mut config = sc_service::Configuration::from_version(&version); match opt.subcommand { - None => sc_cli::run( - config, - opt.run, - service::new_light, - service::new_full, - load_spec, - &version, - ), + None => { + opt.run.init(&version)?; + opt.run.update_config(&mut config, load_spec, &version)?; + opt.run.run( + config, + service::new_light, + service::new_full, + &version, + ) + }, Some(Subcommand::Inspect(cmd)) => { - cmd.init(&mut config, load_spec, &version)?; + cmd.init(&version)?; + cmd.update_config(&mut config, load_spec, &version)?; let client = sc_service::new_full_client::< node_runtime::Block, node_runtime::RuntimeApi, node_executor::Executor, _, _, @@ -50,25 +53,27 @@ where cmd.run(inspect) }, Some(Subcommand::Benchmark(cmd)) => { - cmd.init(&mut config, load_spec, &version)?; + cmd.init(&version)?; + cmd.update_config(&mut config, load_spec, &version)?; cmd.run::<_, _, node_runtime::Block, node_executor::Executor>(config) }, Some(Subcommand::Factory(cli_args)) => { - sc_cli::init(&cli_args.shared_params, &version)?; - sc_cli::init_config(&mut config, &cli_args.shared_params, &version, load_spec)?; - sc_cli::fill_import_params( + cli_args.shared_params.init(&version)?; + cli_args.shared_params.update_config(&mut config, load_spec, &version)?; + cli_args.import_params.update_config( &mut config, - &cli_args.import_params, ServiceRoles::FULL, cli_args.shared_params.dev, )?; - sc_cli::fill_config_keystore_in_memory(&mut config)?; + config.use_in_memory_keystore()?; match ChainSpec::from(config.expect_chain_spec().id()) { Some(ref c) if c == &ChainSpec::Development || c == &ChainSpec::LocalTestnet => {}, - _ => panic!("Factory is only supported for development and local testnet."), + _ => return Err( + "Factory is only supported for development and local testnet.".into() + ), } // Setup tracing. @@ -77,7 +82,9 @@ where cli_args.import_params.tracing_receiver.into(), tracing_targets ); if let Err(e) = tracing::subscriber::set_global_default(subscriber) { - panic!("Unable to set global default subscriber {}", e); + return Err( + format!("Unable to set global default subscriber {}", e).into() + ); } } @@ -96,12 +103,13 @@ where Ok(()) }, - Some(Subcommand::Base(subcommand)) => sc_cli::run_subcommand( - config, - subcommand, - load_spec, - |config: service::NodeConfiguration| Ok(new_full_start!(config).0), - &version, - ), + Some(Subcommand::Base(subcommand)) => { + subcommand.init(&version)?; + subcommand.update_config(&mut config, load_spec, &version)?; + subcommand.run( + config, + |config: service::NodeConfiguration| Ok(new_full_start!(config).0), + ) + }, } } diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index a882483a44a3dd8411eb6892a71e0cb50a506fbd..332c47ea132ea3dc176a07a20d34a7d2e62d0d74 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -22,7 +22,7 @@ use std::sync::Arc; use sc_consensus_babe; use sc_client::{self, LongestChain}; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider}; +use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; use node_executor; use node_primitives::Block; use node_runtime::{GenesisConfig, RuntimeApi}; @@ -30,7 +30,6 @@ use sc_service::{ AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, }; use sp_inherents::InherentDataProviders; -use sc_network::construct_simple_protocol; use sc_service::{Service, NetworkStatus}; use sc_client::{Client, LocalCallExecutor}; @@ -40,17 +39,13 @@ use node_executor::NativeExecutor; use sc_network::NetworkService; use sc_offchain::OffchainWorkers; -construct_simple_protocol! { - /// Demo protocol attachment for substrate. - pub struct NodeProtocol where Block = Block { } -} - /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. macro_rules! new_full_start { ($config:expr) => {{ + use std::sync::Arc; type RpcExtension = jsonrpc_core::IoHandler; let mut import_setup = None; let inherent_data_providers = sp_inherents::InherentDataProviders::new(); @@ -70,7 +65,7 @@ macro_rules! new_full_start { .ok_or_else(|| sc_service::Error::SelectChainRequired)?; let (grandpa_block_import, grandpa_link) = grandpa::block_import( client.clone(), - &*client, + &(client.clone() as Arc<_>), select_chain, )?; let justification_import = grandpa_block_import.clone(); @@ -79,7 +74,6 @@ macro_rules! new_full_start { sc_consensus_babe::Config::get_or_compute(&*client)?, grandpa_block_import, client.clone(), - client.clone(), )?; let import_queue = sc_consensus_babe::import_queue( @@ -87,7 +81,6 @@ macro_rules! new_full_start { block_import.clone(), Some(Box::new(justification_import)), None, - client.clone(), client, inherent_data_providers.clone(), )?; @@ -124,6 +117,7 @@ macro_rules! new_full { ($config:expr, $with_startup_data: expr) => {{ use futures::prelude::*; use sc_network::Event; + use sc_client_api::ExecutorProvider; let ( is_authority, @@ -146,10 +140,12 @@ macro_rules! new_full { let (builder, mut import_setup, inherent_data_providers) = new_full_start!($config); - let service = builder.with_network_protocol(|_| Ok(crate::service::NodeProtocol::new()))? - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, client)) as _) - )? + let service = builder + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(grandpa::FinalityProofProvider::new(backend, provider)) as _) + })? .build()?; let (block_import, grandpa_link, babe_link) = import_setup.take() @@ -158,10 +154,10 @@ macro_rules! new_full { ($with_startup_data)(&block_import, &babe_link); if participates_in_consensus { - let proposer = sc_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; + let proposer = sc_basic_authorship::ProposerFactory::new( + service.client(), + service.transaction_pool() + ); let client = service.client(); let select_chain = service.select_chain() @@ -215,46 +211,41 @@ macro_rules! new_full { gossip_duration: std::time::Duration::from_millis(333), justification_period: 512, name: Some(name), - observer_enabled: true, + observer_enabled: false, keystore, is_authority, }; - match (is_authority, disable_grandpa) { - (false, false) => { - // start the lightweight GRANDPA observer - service.spawn_task("grandpa-observer", grandpa::run_grandpa_observer( - config, - grandpa_link, - service.network(), - service.on_exit(), - )?); - }, - (true, false) => { - // start the full GRANDPA voter - let grandpa_config = grandpa::GrandpaParams { - config: config, - link: grandpa_link, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - on_exit: service.on_exit(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: grandpa::VotingRulesBuilder::default().build(), - }; - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task( - "grandpa-voter", - grandpa::run_grandpa_voter(grandpa_config)? - ); - }, - (_, true) => { - grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - }, + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = grandpa::GrandpaParams { + config, + link: grandpa_link, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + voting_rule: grandpa::VotingRulesBuilder::default().build(), + prometheus_registry: service.prometheus_registry(), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task( + "grandpa-voter", + grandpa::run_grandpa_voter(grandpa_config)? + ); + } else { + grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; } Ok((service, inherent_data_providers)) @@ -264,20 +255,15 @@ macro_rules! new_full { }} } -#[allow(dead_code)] type ConcreteBlock = node_primitives::Block; -#[allow(dead_code)] type ConcreteClient = Client< Backend, - LocalCallExecutor, - NativeExecutor>, + LocalCallExecutor, NativeExecutor>, ConcreteBlock, node_runtime::RuntimeApi >; -#[allow(dead_code)] type ConcreteBackend = Backend; -#[allow(dead_code)] type ConcreteTransactionPool = sc_transaction_pool::BasicPool< sc_transaction_pool::FullChainApi, ConcreteBlock @@ -294,7 +280,7 @@ pub fn new_full(config: NodeConfiguration) ConcreteClient, LongestChain, NetworkStatus, - NetworkService::Hash>, + NetworkService::Hash>, ConcreteTransactionPool, OffchainWorkers< ConcreteClient, @@ -331,10 +317,10 @@ pub fn new_light(config: NodeConfiguration) let fetch_checker = fetcher .map(|fetcher| fetcher.checker().clone()) .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import::<_, _, _, RuntimeApi>( + let grandpa_block_import = grandpa::light_block_import( client.clone(), backend, - &*client, + &(client.clone() as Arc<_>), Arc::new(fetch_checker), )?; @@ -346,7 +332,6 @@ pub fn new_light(config: NodeConfiguration) sc_consensus_babe::Config::get_or_compute(&*client)?, grandpa_block_import, client.clone(), - client.clone(), )?; let import_queue = sc_consensus_babe::import_queue( @@ -355,16 +340,16 @@ pub fn new_light(config: NodeConfiguration) None, Some(Box::new(finality_proof_import)), client.clone(), - client, inherent_data_providers.clone(), )?; Ok((import_queue, finality_proof_request_builder)) })? - .with_network_protocol(|_| Ok(NodeProtocol::new()))? - .with_finality_proof_provider(|client, backend| - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, client)) as _) - )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? .with_rpc_extensions(|builder,| -> Result { @@ -504,7 +489,7 @@ mod tests { |config| { let mut setup_handles = None; new_full!(config, | - block_import: &sc_consensus_babe::BabeBlockImport<_, _, Block, _, _, _>, + block_import: &sc_consensus_babe::BabeBlockImport, babe_link: &sc_consensus_babe::BabeLink, | { setup_handles = Some((block_import.clone(), babe_link.clone())); @@ -521,10 +506,10 @@ mod tests { let parent_header = service.client().header(&parent_id).unwrap().unwrap(); let parent_hash = parent_header.hash(); let parent_number = *parent_header.number(); - let mut proposer_factory = sc_basic_authorship::ProposerFactory { - client: service.client(), - transaction_pool: service.transaction_pool(), - }; + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( + service.client(), + service.transaction_pool() + ); let epoch = babe_link.epoch_changes().lock().epoch_for_child_of( descendent_query(&*service.client()), diff --git a/bin/node/cli/tests/build_spec_works.rs b/bin/node/cli/tests/build_spec_works.rs new file mode 100644 index 0000000000000000000000000000000000000000..2eca71a5b5978de1e1de20873b6584a7d5c703b0 --- /dev/null +++ b/bin/node/cli/tests/build_spec_works.rs @@ -0,0 +1,37 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; +use tempfile::tempdir; + +#[test] +fn build_spec_works() { + let base_path = tempdir().expect("could not create a temp dir"); + + let output = Command::new(cargo_bin("substrate")) + .args(&["build-spec", "--dev", "-d"]) + .arg(base_path.path()) + .output() + .unwrap(); + assert!(output.status.success()); + + // Make sure that the `dev` chain folder exists, but the `db` doesn't + assert!(base_path.path().join("chains/dev/").exists()); + assert!(!base_path.path().join("chains/dev/db").exists()); + + let _value: serde_json::Value = serde_json::from_slice(output.stdout.as_slice()).unwrap(); +} diff --git a/utils/grafana-data-source/test/src/main.rs b/bin/node/cli/tests/check_block_works.rs similarity index 52% rename from utils/grafana-data-source/test/src/main.rs rename to bin/node/cli/tests/check_block_works.rs index 53deaffc3beb6088acc71bfee202bfbae41f0ef4..6bfb82a8bfafb84c1b041b87dc093e2481193ec5 100644 --- a/utils/grafana-data-source/test/src/main.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -1,4 +1,4 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -14,31 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use grafana_data_source::{run_server, record_metrics}; -use std::time::Duration; -use rand::Rng; -use futures::{future::join, executor}; +#![cfg(unix)] -async fn randomness() { - loop { - futures_timer::Delay::new(Duration::from_secs(1)).await; +use assert_cmd::cargo::cargo_bin; +use std::process::Command; +use tempfile::tempdir; - let random = rand::thread_rng().gen_range(0.0, 1000.0); +mod common; - let result = record_metrics!( - "random data" => random, - "random^2" => random * random, - ); +#[test] +fn check_block_works() { + let base_path = tempdir().expect("could not create a temp dir"); - if let Err(error) = result { - eprintln!("{}", error); - } - } -} + common::run_dev_node_for_a_while(base_path.path()); -fn main() { - executor::block_on(join( - run_server("127.0.0.1:9955".parse().unwrap()), - randomness() - )).0.unwrap(); + let status = Command::new(cargo_bin("substrate")) + .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg("1") + .status() + .unwrap(); + assert!(status.success()); } diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 4044f69d08168e4dda1d526ff9b1c4e5efbd32b8..34e371195c16b20e216a9426f238ce6b78bcd257 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -14,21 +14,53 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{process::{Child, ExitStatus}, thread, time::Duration}; +#![cfg(unix)] +#![allow(dead_code)] + +use std::{process::{Child, ExitStatus}, thread, time::Duration, path::Path}; +use assert_cmd::cargo::cargo_bin; +use std::{convert::TryInto, process::Command}; +use nix::sys::signal::{kill, Signal::SIGINT}; +use nix::unistd::Pid; /// Wait for the given `child` the given number of `secs`. /// /// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. pub fn wait_for(child: &mut Child, secs: usize) -> Option { - for _ in 0..secs { + for i in 0..secs { match child.try_wait().unwrap() { - Some(status) => return Some(status), + Some(status) => { + if i > 5 { + eprintln!("Child process took {} seconds to exit gracefully", i); + } + return Some(status) + }, None => thread::sleep(Duration::from_secs(1)), } } - eprintln!("Took to long to exit. Killing..."); + eprintln!("Took too long to exit (> {} seconds). Killing...", secs); let _ = child.kill(); child.wait().unwrap(); None } + +/// Run the node for a while (30 seconds) +pub fn run_dev_node_for_a_while(base_path: &Path) { + let mut cmd = Command::new(cargo_bin("substrate")); + + let mut cmd = cmd + .args(&["--dev"]) + .arg("-d") + .arg(base_path) + .spawn() + .unwrap(); + + // Let it produce some blocks. + thread::sleep(Duration::from_secs(30)); + assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + + // Stop the process + kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); +} diff --git a/bin/node/cli/tests/factory.rs b/bin/node/cli/tests/factory.rs new file mode 100644 index 0000000000000000000000000000000000000000..2930cd52e2e16e497d197849a9833d492c3868d7 --- /dev/null +++ b/bin/node/cli/tests/factory.rs @@ -0,0 +1,40 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![cfg(unix)] + +use assert_cmd::cargo::cargo_bin; +use std::process::{Command, Stdio}; +use tempfile::tempdir; + +mod common; + +#[test] +fn factory_works() { + let base_path = tempdir().expect("could not create a temp dir"); + + let status = Command::new(cargo_bin("substrate")) + .stdout(Stdio::null()) + .args(&["factory", "--dev", "-d"]) + .arg(base_path.path()) + .status() + .unwrap(); + assert!(status.success()); + + // Make sure that the `dev` chain folder exists & `db` + assert!(base_path.path().join("chains/dev/").exists()); + assert!(base_path.path().join("chains/dev/db").exists()); +} diff --git a/bin/node/cli/tests/import_export_and_revert_work.rs b/bin/node/cli/tests/import_export_and_revert_work.rs new file mode 100644 index 0000000000000000000000000000000000000000..131265e3b4ab9bd868b9728bca6cda558d2982f2 --- /dev/null +++ b/bin/node/cli/tests/import_export_and_revert_work.rs @@ -0,0 +1,59 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![cfg(unix)] + +use assert_cmd::cargo::cargo_bin; +use std::{process::Command, fs}; +use tempfile::tempdir; + +mod common; + +#[test] +fn import_export_and_revert_work() { + let base_path = tempdir().expect("could not create a temp dir"); + let exported_blocks = base_path.path().join("exported_blocks"); + + common::run_dev_node_for_a_while(base_path.path()); + + let status = Command::new(cargo_bin("substrate")) + .args(&["export-blocks", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg(&exported_blocks) + .status() + .unwrap(); + assert!(status.success()); + + let metadata = fs::metadata(&exported_blocks).unwrap(); + assert!(metadata.len() > 0, "file exported_blocks should not be empty"); + + let _ = fs::remove_dir_all(base_path.path().join("db")); + + let status = Command::new(cargo_bin("substrate")) + .args(&["import-blocks", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg(&exported_blocks) + .status() + .unwrap(); + assert!(status.success()); + + let status = Command::new(cargo_bin("substrate")) + .args(&["revert", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .status() + .unwrap(); + assert!(status.success()); +} diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs new file mode 100644 index 0000000000000000000000000000000000000000..441b08ccf46da1b1ef70ec49a7a46015d94f3c4c --- /dev/null +++ b/bin/node/cli/tests/inspect_works.rs @@ -0,0 +1,38 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +#![cfg(unix)] + +use assert_cmd::cargo::cargo_bin; +use std::process::Command; +use tempfile::tempdir; + +mod common; + +#[test] +fn inspect_works() { + let base_path = tempdir().expect("could not create a temp dir"); + + common::run_dev_node_for_a_while(base_path.path()); + + let status = Command::new(cargo_bin("substrate")) + .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .args(&["block", "1"]) + .status() + .unwrap(); + assert!(status.success()); +} diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index e6b71db9910487a4e55f2fdf117e48c833b120bb..020259d0c595a57a01b8a9b113522d826e1de912 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -15,39 +15,27 @@ // along with Substrate. If not, see . use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command, thread, time::Duration, fs, path::PathBuf}; +use std::process::Command; +use tempfile::tempdir; mod common; #[test] #[cfg(unix)] fn purge_chain_works() { - use nix::sys::signal::{kill, Signal::SIGINT}; - use nix::unistd::Pid; + let base_path = tempdir().expect("could not create a temp dir"); - let base_path = "purge_chain_test"; - - let _ = fs::remove_dir_all(base_path); - let mut cmd = Command::new(cargo_bin("substrate")) - .args(&["--dev", "-d", base_path]) - .spawn() - .unwrap(); - - // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); - - // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(common::wait_for(&mut cmd, 30).map(|x| x.success()).unwrap_or_default()); + common::run_dev_node_for_a_while(base_path.path()); let status = Command::new(cargo_bin("substrate")) - .args(&["purge-chain", "--dev", "-d", base_path, "-y"]) + .args(&["purge-chain", "--dev", "-d"]) + .arg(base_path.path()) + .arg("-y") .status() .unwrap(); assert!(status.success()); // Make sure that the `dev` chain folder exists, but the `db` is deleted. - assert!(PathBuf::from(base_path).join("chains/dev/").exists()); - assert!(!PathBuf::from(base_path).join("chains/dev/db").exists()); + assert!(base_path.path().join("chains/dev/").exists()); + assert!(!base_path.path().join("chains/dev/db").exists()); } diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index dbb57bdd21ab8de8add8f4faf4057aea2157fdaf..67efedccbe771a65e892b5dbe94bac702d3c01ce 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -15,7 +15,8 @@ // along with Substrate. If not, see . use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command, thread, time::Duration, fs}; +use std::{convert::TryInto, process::Command, thread, time::Duration}; +use tempfile::tempdir; mod common; @@ -26,13 +27,14 @@ fn running_the_node_works_and_can_be_interrupted() { use nix::unistd::Pid; fn run_command_and_kill(signal: Signal) { - let _ = fs::remove_dir_all("interrupt_test"); + let base_path = tempdir().expect("could not create a temp dir"); let mut cmd = Command::new(cargo_bin("substrate")) - .args(&["--dev", "-d", "interrupt_test"]) + .args(&["--dev", "-d"]) + .arg(base_path.path()) .spawn() .unwrap(); - thread::sleep(Duration::from_secs(30)); + thread::sleep(Duration::from_secs(20)); assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); assert_eq!( diff --git a/bin/node/executor/Cargo.toml b/bin/node/executor/Cargo.toml index f55e1dae58eb6b8ce31d00d9028b777f15a09114..c8ef75a5a2a9ae64c3949c0bd250262796ef0874 100644 --- a/bin/node/executor/Cargo.toml +++ b/bin/node/executor/Cargo.toml @@ -1,40 +1,42 @@ [package] name = "node-executor" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Substrate node implementation in Rust." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } -node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sc-executor = { version = "0.8", path = "../../../client/executor" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } -sp-trie = { version = "2.0.0", path = "../../../primitives/trie" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +sp-trie = { version = "2.0.0-alpha.2", path = "../../../primitives/trie" } trie-root = "0.16.0" -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } +frame-benchmarking = { version = "2.0.0-alpha.2", path = "../../../frame/benchmarking" } [dev-dependencies] criterion = "0.3.0" -frame-support = { version = "2.0.0", path = "../../../frame/support" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -node-testing = { version = "2.0.0", path = "../testing" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } +frame-support = { version = "2.0.0-alpha.2", path = "../../../frame/support" } +frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } +node-testing = { version = "2.0.0-alpha.2", path = "../testing" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } +pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-alpha.2", path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-alpha.2", path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } +pallet-session = { version = "2.0.0-alpha.2", path = "../../../frame/session" } +pallet-timestamp = { version = "2.0.0-alpha.2", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-alpha.2", path = "../../../frame/treasury" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +substrate-test-client = { version = "2.0.0-dev", path = "../../../test-utils/client" } wabt = "0.9.2" [features] diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 034c7c6759e112a7db29ab63cf7c01a81f35340a..ef0cdf445a561c813a3059602e3aef41106f4ec3 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -23,12 +23,13 @@ use node_runtime::{ }; use node_runtime::constants::currency::*; use node_testing::keyring::*; -use sp_core::{Blake2Hasher, NativeOrEncoded, NeverNativeValue}; +use sp_core::{NativeOrEncoded, NeverNativeValue}; use sp_core::storage::well_known_keys; use sp_core::traits::CodeExecutor; use frame_support::Hashable; use sp_state_machine::TestExternalities as CoreTestExternalities; use sc_executor::{NativeExecutor, RuntimeInfo, WasmExecutionMethod, Externalities}; +use sp_runtime::traits::BlakeTwo256; criterion_group!(benches, bench_execute_block); criterion_main!(benches); @@ -54,7 +55,7 @@ fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) } -fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities { +fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities { let mut test_ext = TestExternalities::new_with_code( COMPACT_CODE, genesis_config.build_storage().unwrap(), @@ -76,7 +77,7 @@ fn construct_block( let extrinsics = extrinsics.into_iter().map(sign).collect::>(); // calculate the header fields that we can. - let extrinsics_root = Layout::::ordered_trie_root( + let extrinsics_root = Layout::::ordered_trie_root( extrinsics.iter().map(Encode::encode) ).to_fixed_bytes() .into(); @@ -90,7 +91,7 @@ fn construct_block( }; // execute the block to get the real header. - executor.call::<_, NeverNativeValue, fn() -> _>( + executor.call:: _>( ext, "Core_initialize_block", &header.encode(), @@ -99,7 +100,7 @@ fn construct_block( ).0.unwrap(); for i in extrinsics.iter() { - executor.call::<_, NeverNativeValue, fn() -> _>( + executor.call:: _>( ext, "BlockBuilder_apply_extrinsic", &i.encode(), @@ -108,7 +109,7 @@ fn construct_block( ).0.unwrap(); } - let header = match executor.call::<_, NeverNativeValue, fn() -> _>( + let header = match executor.call:: _>( ext, "BlockBuilder_finalize_block", &[0u8;0], @@ -165,7 +166,7 @@ fn bench_execute_block(c: &mut Criterion) { // Get the runtime version to initialize the runtimes cache. { let mut test_ext = new_test_ext(&genesis_config); - executor.runtime_version(&mut test_ext.ext()); + executor.runtime_version(&mut test_ext.ext()).unwrap(); } let blocks = test_blocks(&genesis_config, &executor); @@ -174,7 +175,7 @@ fn bench_execute_block(c: &mut Criterion) { || new_test_ext(&genesis_config), |test_ext| { for block in blocks.iter() { - executor.call::<_, NeverNativeValue, fn() -> _>( + executor.call:: _>( &mut test_ext.ext(), "Core_execute_block", &block.0, diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 100bdf3fe60ee8a90a557e4b174c6e76cc712b60..1ee0a17c81120ca32a7a9af2d704ce97699ee454 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -21,13 +21,11 @@ use frame_support::{ weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, }; use sp_core::{ - Blake2Hasher, NeverNativeValue, map, - traits::Externalities, - storage::{well_known_keys, Storage}, + NeverNativeValue, map, traits::Externalities, storage::{well_known_keys, Storage}, }; use sp_runtime::{ ApplyExtrinsicResult, Fixed64, - traits::{Hash as HashT, Convert}, + traits::{Hash as HashT, Convert, BlakeTwo256}, transaction_validity::InvalidTransaction, }; use pallet_contracts::ContractAddressFor; @@ -93,7 +91,6 @@ fn changes_trie_block() -> (Vec, Hash) { ) } - /// block 1 and 2 must be created together to ensure transactions are only signed once (since they /// are not guaranteed to be deterministic) and to ensure that the correct state is propagated /// from block1's execution to block2 to derive the correct storage_root. @@ -161,10 +158,10 @@ fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { #[test] fn panic_execution_with_foreign_code_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (69u128, 0u128, 0u128, 0u128).encode() + (69u128, 0u8, 0u128, 0u128, 0u128).encode() }, >::hashed_key().to_vec() => { 69_u128.encode() @@ -197,10 +194,10 @@ fn panic_execution_with_foreign_code_gives_error() { #[test] fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (0u32, 69u128, 0u128, 0u128, 0u128).encode() + (0u32, 0u8, 69u128, 0u128, 0u128, 0u128).encode() }, >::hashed_key().to_vec() => { 69_u128.encode() @@ -233,10 +230,10 @@ fn bad_extrinsic_with_native_equivalent_code_gives_error() { #[test] fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() }, >::hashed_key().to_vec() => { (111 * DOLLARS).encode() @@ -275,10 +272,10 @@ fn successful_execution_with_native_equivalent_code_gives_ok() { #[test] fn successful_execution_with_foreign_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() }, >::hashed_key().to_vec() => { (111 * DOLLARS).encode() @@ -347,7 +344,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(1984800000000)), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { @@ -381,13 +378,14 @@ fn full_native_block_import_works() { ).0.unwrap(); t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); assert_eq!( Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - transfer_fee(&xt(), fm), + alice_last_known_balance - 10 * DOLLARS - fees, ); assert_eq!( Balances::total_balance(&bob()), - 179 * DOLLARS - transfer_fee(&xt(), fm), + 179 * DOLLARS - fees, ); let events = vec![ EventRecord { @@ -399,7 +397,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(1984788199392)), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { @@ -422,7 +420,7 @@ fn full_native_block_import_works() { }, EventRecord { phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(1984788199392)), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), topics: vec![], }, EventRecord { @@ -699,7 +697,7 @@ fn native_big_block_import_fails_on_fallback() { #[test] fn panic_execution_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { + let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { top: map![ >::hashed_key().to_vec() => { 0_u128.encode() @@ -730,10 +728,10 @@ fn panic_execution_gives_error() { #[test] fn successful_execution_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (0u32, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() }, >::hashed_key().to_vec() => { (111 * DOLLARS).encode() diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 090d2ee5d4ace1b188641536e0732033fa6d0f34..34f3034208a08eb6d69cdda4929d99a77b4da0a1 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -17,11 +17,8 @@ use codec::{Encode, Decode}; use frame_support::Hashable; use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_core::{ - Blake2Hasher, NeverNativeValue, NativeOrEncoded, - traits::CodeExecutor, -}; -use sp_runtime::{ApplyExtrinsicResult, traits::Header as HeaderT}; +use sp_core::{NeverNativeValue, NativeOrEncoded, traits::CodeExecutor}; +use sp_runtime::{ApplyExtrinsicResult, traits::{Header as HeaderT, BlakeTwo256}}; use sc_executor::{NativeExecutor, WasmExecutionMethod}; use sc_executor::error::Result; @@ -67,14 +64,14 @@ pub fn executor_call< R:Decode + Encode + PartialEq, NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe >( - t: &mut TestExternalities, + t: &mut TestExternalities, method: &str, data: &[u8], use_native: bool, native_call: Option, ) -> (Result>, bool) { let mut t = t.ext(); - executor().call::<_, R, NC>( + executor().call::( &mut t, method, data, @@ -83,7 +80,7 @@ pub fn executor_call< ) } -pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { +pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { let mut ext = TestExternalities::new_with_code( code, node_testing::genesis::config(support_changes_trie, Some(code)).build_storage().unwrap(), @@ -97,7 +94,7 @@ pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalitie /// `extrinsics` must be a list of valid extrinsics, i.e. none of the extrinsics for example /// can report `ExhaustResources`. Otherwise, this function panics. pub fn construct_block( - env: &mut TestExternalities, + env: &mut TestExternalities, number: BlockNumber, parent_hash: Hash, extrinsics: Vec, @@ -109,7 +106,7 @@ pub fn construct_block( // calculate the header fields that we can. let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) .to_fixed_bytes() .into(); diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index ba303a6feb6ff05698f59de15485f65d9beb999c..ea9d740a05ef86e77fa923c8863d55a984cdbca7 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -20,18 +20,12 @@ use frame_support::{ traits::Currency, weights::GetDispatchInfo, }; -use sp_core::{ - Blake2Hasher, NeverNativeValue, map, - storage::Storage, -}; -use sp_runtime::{ - Fixed64, - traits::Convert, -}; +use sp_core::{NeverNativeValue, map, storage::Storage}; +use sp_runtime::{Fixed64, Perbill, traits::{Convert, BlakeTwo256}}; use node_runtime::{ - CheckedExtrinsic, Call, Runtime, Balances, - TransactionPayment, TransactionBaseFee, TransactionByteFee, - WeightFeeCoefficient, constants::currency::*, + CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, TransactionBaseFee, + TransactionByteFee, WeightFeeCoefficient, + constants::currency::*, }; use node_runtime::impls::LinearWeightToFee; use node_primitives::Balance; @@ -60,12 +54,12 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { GENESIS_HASH.into(), vec![ CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block()), + function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(90))), } ] ); @@ -77,8 +71,8 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { block1.1.clone(), vec![ CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), }, CheckedExtrinsic { signed: Some((charlie(), signed_extra(1, 0))), @@ -87,7 +81,11 @@ fn fee_multiplier_increases_and_decreases_on_big_weight() { ] ); - println!("++ Block 1 size: {} / Block 2 size {}", block1.0.encode().len(), block2.0.encode().len()); + println!( + "++ Block 1 size: {} / Block 2 size {}", + block1.0.encode().len(), + block2.0.encode().len(), + ); // execute a big block. executor_call:: _>( @@ -132,13 +130,13 @@ fn transaction_fee_is_correct_ultimate() { // - 1 MILLICENTS in substrate node. // - 1 milli-dot based on current polkadot runtime. // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { + let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { top: map![ >::hashed_key_for(alice()) => { - (0u32, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + (0u32, 0u8, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() }, >::hashed_key_for(bob()) => { - (0u32, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + (0u32, 0u8, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() }, >::hashed_key().to_vec() => { (110 * DOLLARS).encode() diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index b870cf40370f1ad8fde950f607cfc86159e6aefd..1a92aeca6ba77b61597f3f0d19317f67adca2b03 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -167,8 +167,9 @@ fn submitted_transaction_should_be_valid() { // add balance to the account let author = extrinsic.signature.clone().unwrap().0; let address = Indices::lookup(author).unwrap(); - let account = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - >::insert(&address, (0u32, account)); + let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; + let account = frame_system::AccountInfo { nonce: 0u32, refcount: 0u8, data }; + >::insert(&address, account); // check validity let res = Executive::validate_transaction(extrinsic); diff --git a/bin/node/inspect/Cargo.toml b/bin/node/inspect/Cargo.toml index cbce7e4589698b75e7c0c0c01cb48c896628cef2..022f4d0ca49f5252cedce67ea6750a4c5aae8a18 100644 --- a/bin/node/inspect/Cargo.toml +++ b/bin/node/inspect/Cargo.toml @@ -1,17 +1,20 @@ [package] name = "node-inspect" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } derive_more = "0.99" log = "0.4.8" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-service = { version = "0.8", default-features = false, path = "../../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } +sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } structopt = "0.3.8" diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index 27afcfff919ee276220f20871a0140f6c2aa326c..5d51bd5848f17d308933fdcb69d429504fa8445e 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -16,12 +16,8 @@ //! Structs to easily compose inspect sub-command for CLI. -use std::{ - fmt::Debug, - str::FromStr, -}; -use crate::{Inspector, PrettyPrinter}; -use sc_cli::{ImportParams, SharedParams, error}; +use std::fmt::Debug; +use sc_cli::{ImportParams, SharedParams}; use structopt::StructOpt; /// The `inspect` command used to print decoded chain data. @@ -64,177 +60,3 @@ pub enum InspectSubCmd { input: String, }, } - -impl InspectCmd { - /// Parse CLI arguments and initialize given config. - pub fn init( - &self, - config: &mut sc_service::config::Configuration, - spec_factory: impl FnOnce(&str) -> Result>, String>, - version: &sc_cli::VersionInfo, - ) -> error::Result<()> where - G: sc_service::RuntimeGenesis, - E: sc_service::ChainSpecExtension, - { - sc_cli::init_config(config, &self.shared_params, version, spec_factory)?; - // make sure to configure keystore - sc_cli::fill_config_keystore_in_memory(config)?; - // and all import params (especially pruning that has to match db meta) - sc_cli::fill_import_params( - config, - &self.import_params, - sc_service::Roles::FULL, - self.shared_params.dev, - )?; - Ok(()) - } - - /// Run the inspect command, passing the inspector. - pub fn run( - self, - inspect: Inspector, - ) -> error::Result<()> where - B: sp_runtime::traits::Block, - B::Hash: FromStr, - P: PrettyPrinter, - { - match self.command { - InspectSubCmd::Block { input } => { - let input = input.parse()?; - let res = inspect.block(input) - .map_err(|e| format!("{}", e))?; - println!("{}", res); - Ok(()) - }, - InspectSubCmd::Extrinsic { input } => { - let input = input.parse()?; - let res = inspect.extrinsic(input) - .map_err(|e| format!("{}", e))?; - println!("{}", res); - Ok(()) - }, - } - } -} - - -/// A block to retrieve. -#[derive(Debug, Clone, PartialEq)] -pub enum BlockAddress { - /// Get block by hash. - Hash(Hash), - /// Get block by number. - Number(Number), - /// Raw SCALE-encoded bytes. - Bytes(Vec), -} - -impl FromStr for BlockAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // try to parse hash first - if let Ok(hash) = s.parse() { - return Ok(Self::Hash(hash)) - } - - // then number - if let Ok(number) = s.parse() { - return Ok(Self::Number(number)) - } - - // then assume it's bytes (hex-encoded) - sp_core::bytes::from_hex(s) - .map(Self::Bytes) - .map_err(|e| format!( - "Given string does not look like hash or number. It could not be parsed as bytes either: {}", - e - )) - } -} - -/// An extrinsic address to decode and print out. -#[derive(Debug, Clone, PartialEq)] -pub enum ExtrinsicAddress { - /// Extrinsic as part of existing block. - Block(BlockAddress, usize), - /// Raw SCALE-encoded extrinsic bytes. - Bytes(Vec), -} - -impl FromStr for ExtrinsicAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // first try raw bytes - if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { - return Ok(bytes) - } - - // split by a bunch of different characters - let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); - let block = it.next() - .expect("First element of split iterator is never empty; qed") - .parse()?; - - let index = it.next() - .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? - .parse() - .map_err(|e| format!("Invalid index format: {}", e))?; - - Ok(Self::Block(block, index)) - } -} - - -#[cfg(test)] -mod tests { - use super::*; - use sp_core::hash::H160 as Hash; - - #[test] - fn should_parse_block_strings() { - type BlockAddress = super::BlockAddress; - - let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); - let b1 = BlockAddress::from_str("1234"); - let b2 = BlockAddress::from_str("0"); - let b3 = BlockAddress::from_str("0x0012345f"); - - - assert_eq!(b0, Ok(BlockAddress::Hash( - "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() - ))); - assert_eq!(b1, Ok(BlockAddress::Number(1234))); - assert_eq!(b2, Ok(BlockAddress::Number(0))); - assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); - } - - #[test] - fn should_parse_extrinsic_address() { - type BlockAddress = super::BlockAddress; - type ExtrinsicAddress = super::ExtrinsicAddress; - - let e0 = ExtrinsicAddress::from_str("1234"); - let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); - let b1 = ExtrinsicAddress::from_str("1234:0"); - let b2 = ExtrinsicAddress::from_str("0 0"); - let b3 = ExtrinsicAddress::from_str("0x0012345f"); - - - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); - assert_eq!(b0, Ok(ExtrinsicAddress::Block( - BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), - 5 - ))); - assert_eq!(b1, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(1234), - 0 - ))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(0), - 0 - ))); - assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); - } -} diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs new file mode 100644 index 0000000000000000000000000000000000000000..71e70e3e44fd4cda6ac398a385d1785c765079b9 --- /dev/null +++ b/bin/node/inspect/src/command.rs @@ -0,0 +1,204 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Command ran by the CLI + +use std::{ + fmt::Debug, + str::FromStr, +}; + +use crate::cli::{InspectCmd, InspectSubCmd}; +use crate::{Inspector, PrettyPrinter}; + +impl InspectCmd { + /// Initialize + pub fn init(&self, version: &sc_cli::VersionInfo) -> sc_cli::Result<()> { + self.shared_params.init(version) + } + + /// Parse CLI arguments and initialize given config. + pub fn update_config( + &self, + mut config: &mut sc_service::config::Configuration, + spec_factory: impl FnOnce(&str) -> Result>, String>, + version: &sc_cli::VersionInfo, + ) -> sc_cli::Result<()> where + G: sc_service::RuntimeGenesis, + E: sc_service::ChainSpecExtension, + { + self.shared_params.update_config(config, spec_factory, version)?; + + // make sure to configure keystore + config.use_in_memory_keystore()?; + + // and all import params (especially pruning that has to match db meta) + self.import_params.update_config( + &mut config, + sc_service::Roles::FULL, + self.shared_params.dev, + )?; + + Ok(()) + } + + /// Run the inspect command, passing the inspector. + pub fn run( + self, + inspect: Inspector, + ) -> sc_cli::Result<()> where + B: sp_runtime::traits::Block, + B::Hash: FromStr, + P: PrettyPrinter, + { + match self.command { + InspectSubCmd::Block { input } => { + let input = input.parse()?; + let res = inspect.block(input) + .map_err(|e| format!("{}", e))?; + println!("{}", res); + Ok(()) + }, + InspectSubCmd::Extrinsic { input } => { + let input = input.parse()?; + let res = inspect.extrinsic(input) + .map_err(|e| format!("{}", e))?; + println!("{}", res); + Ok(()) + }, + } + } +} + +/// A block to retrieve. +#[derive(Debug, Clone, PartialEq)] +pub enum BlockAddress { + /// Get block by hash. + Hash(Hash), + /// Get block by number. + Number(Number), + /// Raw SCALE-encoded bytes. + Bytes(Vec), +} + +impl FromStr for BlockAddress { + type Err = String; + + fn from_str(s: &str) -> Result { + // try to parse hash first + if let Ok(hash) = s.parse() { + return Ok(Self::Hash(hash)) + } + + // then number + if let Ok(number) = s.parse() { + return Ok(Self::Number(number)) + } + + // then assume it's bytes (hex-encoded) + sp_core::bytes::from_hex(s) + .map(Self::Bytes) + .map_err(|e| format!( + "Given string does not look like hash or number. It could not be parsed as bytes either: {}", + e + )) + } +} + +/// An extrinsic address to decode and print out. +#[derive(Debug, Clone, PartialEq)] +pub enum ExtrinsicAddress { + /// Extrinsic as part of existing block. + Block(BlockAddress, usize), + /// Raw SCALE-encoded extrinsic bytes. + Bytes(Vec), +} + +impl FromStr for ExtrinsicAddress { + type Err = String; + + fn from_str(s: &str) -> Result { + // first try raw bytes + if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { + return Ok(bytes) + } + + // split by a bunch of different characters + let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); + let block = it.next() + .expect("First element of split iterator is never empty; qed") + .parse()?; + + let index = it.next() + .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? + .parse() + .map_err(|e| format!("Invalid index format: {}", e))?; + + Ok(Self::Block(block, index)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use sp_core::hash::H160 as Hash; + + #[test] + fn should_parse_block_strings() { + type BlockAddress = super::BlockAddress; + + let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); + let b1 = BlockAddress::from_str("1234"); + let b2 = BlockAddress::from_str("0"); + let b3 = BlockAddress::from_str("0x0012345f"); + + + assert_eq!(b0, Ok(BlockAddress::Hash( + "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() + ))); + assert_eq!(b1, Ok(BlockAddress::Number(1234))); + assert_eq!(b2, Ok(BlockAddress::Number(0))); + assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } + + #[test] + fn should_parse_extrinsic_address() { + type BlockAddress = super::BlockAddress; + type ExtrinsicAddress = super::ExtrinsicAddress; + + let e0 = ExtrinsicAddress::from_str("1234"); + let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); + let b1 = ExtrinsicAddress::from_str("1234:0"); + let b2 = ExtrinsicAddress::from_str("0 0"); + let b3 = ExtrinsicAddress::from_str("0x0012345f"); + + + assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); + assert_eq!(b0, Ok(ExtrinsicAddress::Block( + BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), + 5 + ))); + assert_eq!(b1, Ok(ExtrinsicAddress::Block( + BlockAddress::Number(1234), + 0 + ))); + assert_eq!(b2, Ok(ExtrinsicAddress::Block( + BlockAddress::Number(0), + 0 + ))); + assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } +} diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index 5c4e18c0a74a643b4625b616b61c284281171e74..cd32f08e9fe59309bc3f4daf33035a763c1b242b 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -23,6 +23,7 @@ #![warn(missing_docs)] pub mod cli; +pub mod command; use std::{ fmt, @@ -37,8 +38,10 @@ use sp_runtime::{ traits::{Block, HashFor, NumberFor, Hash} }; +use command::{BlockAddress, ExtrinsicAddress}; + /// A helper type for a generic block input. -pub type BlockAddressFor = cli::BlockAddress< +pub type BlockAddressFor = BlockAddress< as Hash>::Output, NumberFor >; @@ -148,10 +151,10 @@ impl> Inspector fn get_block(&self, input: BlockAddressFor) -> Result { Ok(match input { - cli::BlockAddress::Bytes(bytes) => { + BlockAddress::Bytes(bytes) => { TBlock::decode(&mut &*bytes)? }, - cli::BlockAddress::Number(number) => { + BlockAddress::Number(number) => { let id = BlockId::number(number); let not_found = format!("Could not find block {:?}", id); let body = self.chain.block_body(&id)? @@ -160,7 +163,7 @@ impl> Inspector .ok_or_else(|| Error::NotFound(not_found.clone()))?; TBlock::new(header, body) }, - cli::BlockAddress::Hash(hash) => { + BlockAddress::Hash(hash) => { let id = BlockId::hash(hash); let not_found = format!("Could not find block {:?}", id); let body = self.chain.block_body(&id)? @@ -175,7 +178,7 @@ impl> Inspector /// Get a pretty-printed extrinsic. pub fn extrinsic( &self, - input: cli::ExtrinsicAddress< as Hash>::Output, NumberFor>, + input: ExtrinsicAddress< as Hash>::Output, NumberFor>, ) -> Result { struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B); impl<'a, A: Block, B: PrettyPrinter> fmt::Display for ExtrinsicPrinter<'a, A, B> { @@ -185,7 +188,7 @@ impl> Inspector } let ext = match input { - cli::ExtrinsicAddress::Block(block, index) => { + ExtrinsicAddress::Block(block, index) => { let block = self.get_block(block)?; block.extrinsics() .get(index) @@ -194,7 +197,7 @@ impl> Inspector "Could not find extrinsic {} in block {:?}", index, block )))? }, - cli::ExtrinsicAddress::Bytes(bytes) => { + ExtrinsicAddress::Bytes(bytes) => { TBlock::Extrinsic::decode(&mut &*bytes)? } }; diff --git a/bin/node/primitives/Cargo.toml b/bin/node/primitives/Cargo.toml index 5fc6ce8f101bdb807481917dcd65e679ab851cb5..cb271b987dba2e99c725258713d633863ff81e8b 100644 --- a/bin/node/primitives/Cargo.toml +++ b/bin/node/primitives/Cargo.toml @@ -1,16 +1,18 @@ [package] name = "node-primitives" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } [dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +sp-serializer = { version = "2.0.0-alpha.2", path = "../../../primitives/serializer" } pretty_assertions = "0.6.1" [features] diff --git a/bin/node/rpc-client/Cargo.toml b/bin/node/rpc-client/Cargo.toml index 0d8106dceed7fff28e6d7bf1f7006a77362af920..8b37aff291341bd52cc11503fc4d37597769411c 100644 --- a/bin/node/rpc-client/Cargo.toml +++ b/bin/node/rpc-client/Cargo.toml @@ -1,9 +1,11 @@ [package] name = "node-rpc-client" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] env_logger = "0.7.0" @@ -11,5 +13,5 @@ futures = "0.1.29" hyper = "0.12.35" jsonrpc-core-client = { version = "14.0.3", features = ["http", "ws"] } log = "0.4.8" -node-primitives = { version = "2.0.0", path = "../primitives" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } +sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } diff --git a/bin/node/rpc/Cargo.toml b/bin/node/rpc/Cargo.toml index aefc9222f781da744c647e0969a337f9950b5d0d..1155eab304f8ae953207565d13f9434a4d27bb50 100644 --- a/bin/node/rpc/Cargo.toml +++ b/bin/node/rpc/Cargo.toml @@ -1,25 +1,27 @@ [package] name = "node-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-client = { version = "0.8", path = "../../../client/" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } jsonrpc-core = "14.0.3" -node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -pallet-contracts-rpc = { version = "0.8", path = "../../../frame/contracts/rpc/" } -pallet-transaction-payment-rpc = { version = "2.0.0", path = "../../../frame/transaction-payment/rpc/" } -substrate-frame-rpc-system = { version = "2.0.0", path = "../../../utils/frame/rpc/system" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-consensus-babe = { version = "0.8", path = "../../../client/consensus/babe" } -sc-consensus-babe-rpc = { version = "0.8", path = "../../../client/consensus/babe/rpc" } -sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -sc-consensus-epochs = { version = "0.8", path = "../../../client/consensus/epochs" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +pallet-contracts-rpc = { version = "0.8.0-alpha.2", path = "../../../frame/contracts/rpc/" } +pallet-transaction-payment-rpc = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment/rpc/" } +substrate-frame-rpc-system = { version = "2.0.0-alpha.2", path = "../../../utils/frame/rpc/system" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } +sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe" } +sc-consensus-babe-rpc = { version = "0.8.0-alpha.2", path = "../../../client/consensus/babe/rpc" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } +sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../../client/consensus/epochs" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } diff --git a/bin/node/runtime/Cargo.toml b/bin/node/runtime/Cargo.toml index 8156e4d44400cc7ee77b018ff49d090ae86125b9..0d65cf5339536c088f94243cbcb4ac80c17f0f64 100644 --- a/bin/node/runtime/Cargo.toml +++ b/bin/node/runtime/Cargo.toml @@ -1,78 +1,80 @@ [package] name = "node-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] # third-party dependencies -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } integer-sqrt = { version = "0.1.2" } rustc-hex = { version = "2.0", optional = true } serde = { version = "1.0.102", optional = true } # primitives -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../primitives/authority-discovery" } -sp-consensus-babe = { version = "0.8", default-features = false, path = "../../../primitives/consensus/babe" } -sp-block-builder = { path = "../../../primitives/block-builder", default-features = false} -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } -node-primitives = { version = "2.0.0", default-features = false, path = "../primitives" } -sp-offchain = { version = "2.0.0", default-features = false, path = "../../../primitives/offchain" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../../primitives/staking" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../../primitives/keyring" } -sp-session = { version = "2.0.0", default-features = false, path = "../../../primitives/session" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../../primitives/transaction-pool" } -sp-version = { version = "2.0.0", default-features = false, path = "../../../primitives/version" } +sp-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/authority-discovery" } +sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/consensus/babe" } +sp-block-builder = { path = "../../../primitives/block-builder", default-features = false, version = "2.0.0-alpha.2"} +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/inherents" } +node-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../primitives" } +sp-offchain = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/offchain" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/staking" } +sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../../primitives/keyring" } +sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/session" } +sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/transaction-pool" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/version" } # frame dependencies -frame-executive = { version = "2.0.0", default-features = false, path = "../../../frame/executive" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../../../frame/benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../frame/support" } -frame-system = { version = "2.0.0", default-features = false, path = "../../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } -pallet-authority-discovery = { version = "2.0.0", default-features = false, path = "../../../frame/authority-discovery" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../../../frame/authorship" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../../frame/babe" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../../../frame/balances" } -pallet-collective = { version = "2.0.0", default-features = false, path = "../../../frame/collective" } -pallet-contracts = { version = "2.0.0", default-features = false, path = "../../../frame/contracts" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../../frame/contracts/common/" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } -pallet-democracy = { version = "2.0.0", default-features = false, path = "../../../frame/democracy" } -pallet-elections-phragmen = { version = "2.0.0", default-features = false, path = "../../../frame/elections-phragmen" } -pallet-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../frame/finality-tracker" } -pallet-grandpa = { version = "2.0.0", default-features = false, path = "../../../frame/grandpa" } -pallet-im-online = { version = "2.0.0", default-features = false, path = "../../../frame/im-online" } -pallet-indices = { version = "2.0.0", default-features = false, path = "../../../frame/indices" } -pallet-identity = { version = "2.0.0", default-features = false, path = "../../../frame/identity" } -pallet-membership = { version = "2.0.0", default-features = false, path = "../../../frame/membership" } -pallet-offences = { version = "2.0.0", default-features = false, path = "../../../frame/offences" } -pallet-randomness-collective-flip = { version = "2.0.0", default-features = false, path = "../../../frame/randomness-collective-flip" } -pallet-recovery = { version = "2.0.0", default-features = false, path = "../../../frame/recovery" } -pallet-session = { version = "2.0.0", features = ["historical"], path = "../../../frame/session", default-features = false } -pallet-staking = { version = "2.0.0", features = ["migrate"], path = "../../../frame/staking", default-features = false } -pallet-staking-reward-curve = { version = "2.0.0", path = "../../../frame/staking/reward-curve" } -pallet-sudo = { version = "2.0.0", default-features = false, path = "../../../frame/sudo" } -pallet-society = { version = "2.0.0", default-features = false, path = "../../../frame/society" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../../frame/timestamp" } -pallet-treasury = { version = "2.0.0", default-features = false, path = "../../../frame/treasury" } -pallet-utility = { version = "2.0.0", default-features = false, path = "../../../frame/utility" } -pallet-transaction-payment = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } -pallet-vesting = { version = "2.0.0", default-features = false, path = "../../../frame/vesting" } +frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/executive" } +frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/system/rpc/runtime-api/" } +pallet-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/authority-discovery" } +pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/authorship" } +pallet-babe = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/babe" } +pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/balances" } +pallet-collective = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/collective" } +pallet-contracts = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/contracts" } +pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/contracts/common/" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.2", default-features = false, path = "../../../frame/contracts/rpc/runtime-api/" } +pallet-democracy = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/democracy" } +pallet-elections-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/elections-phragmen" } +pallet-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/finality-tracker" } +pallet-grandpa = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/grandpa" } +pallet-im-online = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/im-online" } +pallet-indices = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/indices" } +pallet-identity = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/identity" } +pallet-membership = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/membership" } +pallet-offences = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/offences" } +pallet-randomness-collective-flip = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/randomness-collective-flip" } +pallet-recovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/recovery" } +pallet-session = { version = "2.0.0-alpha.2", features = ["historical"], path = "../../../frame/session", default-features = false } +pallet-staking = { version = "2.0.0-alpha.2", features = ["migrate"], path = "../../../frame/staking", default-features = false } +pallet-staking-reward-curve = { version = "2.0.0-alpha.2", path = "../../../frame/staking/reward-curve" } +pallet-sudo = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/sudo" } +pallet-society = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/society" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/timestamp" } +pallet-treasury = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/treasury" } +pallet-utility = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/utility" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/transaction-payment" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/transaction-payment/rpc/runtime-api/" } +pallet-vesting = { version = "2.0.0-alpha.2", default-features = false, path = "../../../frame/vesting" } [build-dependencies] -wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../../primitives/io" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } [features] default = ["std"] @@ -130,3 +132,9 @@ std = [ "pallet-recovery/std", "pallet-vesting/std", ] +runtime-benchmarks = [ + "frame-benchmarking", + "pallet-timestamp/runtime-benchmarks", + "pallet-identity/runtime-benchmarks", + "pallet-balances/runtime-benchmarks", +] diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index b60056e0450b34d8b2e5986bb4b9cf4820f69c57..ae0de952e49fb75d6f6a0ebbf57ac1b546087c9b 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -31,7 +31,8 @@ pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; use sp_api::impl_runtime_apis; use sp_runtime::{ - Permill, Perbill, Percent, ApplyExtrinsicResult, impl_opaque_keys, generic, create_runtime_str, + Permill, Perbill, Percent, ApplyExtrinsicResult, RuntimeString, + impl_opaque_keys, generic, create_runtime_str, }; use sp_runtime::curve::PiecewiseLinear; use sp_runtime::transaction_validity::TransactionValidity; @@ -67,6 +68,7 @@ use impls::{CurrencyToVoteHandler, Author, LinearWeightToFee, TargetedFeeAdjustm /// Constant values used within the runtime. pub mod constants; use constants::{time::*, currency::*}; +use frame_system::Trait; // Make the WASM binary available. #[cfg(feature = "std")] @@ -81,7 +83,7 @@ pub const VERSION: RuntimeVersion = RuntimeVersion { // and set impl_version to 0. If only runtime // implementation changes and behavior does not, then leave spec_version as // is and increment impl_version. - spec_version: 222, + spec_version: 230, impl_version: 0, apis: RUNTIME_API_VERSIONS, }; @@ -131,7 +133,7 @@ impl frame_system::Trait for Runtime { type ModuleToIndex = ModuleToIndex; type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = (Balances, Staking, Contracts, Session, Recovery); + type OnKilledAccount = (); } parameter_types! { @@ -268,6 +270,7 @@ parameter_types! { pub const BondingDuration: pallet_staking::EraIndex = 24 * 28; pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub const MaxNominatorRewardedPerValidator: u32 = 64; } impl pallet_staking::Trait for Runtime { @@ -285,6 +288,7 @@ impl pallet_staking::Trait for Runtime { type SlashCancelOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; type SessionInterface = Self; type RewardCurve = RewardCurve; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; } parameter_types! { @@ -327,11 +331,16 @@ impl pallet_democracy::Trait for Runtime { type Slash = Treasury; } +parameter_types! { + pub const CouncilMotionDuration: BlockNumber = 5 * DAYS; +} + type CouncilCollective = pallet_collective::Instance1; impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; + type MotionDuration = CouncilMotionDuration; } parameter_types! { @@ -357,11 +366,16 @@ impl pallet_elections_phragmen::Trait for Runtime { type TermDuration = TermDuration; } +parameter_types! { + pub const TechnicalMotionDuration: BlockNumber = 5 * DAYS; +} + type TechnicalCollective = pallet_collective::Instance2; impl pallet_collective::Trait for Runtime { type Origin = Origin; type Proposal = Call; type Event = Event; + type MotionDuration = TechnicalMotionDuration; } impl pallet_membership::Trait for Runtime { @@ -370,6 +384,7 @@ impl pallet_membership::Trait for Runtime { type RemoveOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type SwapOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type ResetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type PrimeOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; type MembershipInitialized = TechnicalCommittee; type MembershipChanged = TechnicalCommittee; } @@ -587,10 +602,15 @@ impl pallet_society::Trait for Runtime { type ChallengePeriod = ChallengePeriod; } +parameter_types! { + pub const MinVestedTransfer: Balance = 100 * DOLLARS; +} + impl pallet_vesting::Trait for Runtime { type Event = Event; type Currency = Balances; type BlockNumberToBalance = ConvertInto; + type MinVestedTransfer = MinVestedTransfer; } construct_runtime!( @@ -815,21 +835,44 @@ impl_runtime_apis! { } } + #[cfg(feature = "runtime-benchmarks")] impl frame_benchmarking::Benchmark for Runtime { fn dispatch_benchmark( module: Vec, extrinsic: Vec, - steps: u32, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, repeat: u32, - ) -> Option> { + ) -> Result, RuntimeString> { use frame_benchmarking::Benchmarking; - match module.as_slice() { - b"pallet-balances" | b"balances" => Balances::run_benchmark(extrinsic, steps, repeat).ok(), - b"pallet-identity" | b"identity" => Identity::run_benchmark(extrinsic, steps, repeat).ok(), - b"pallet-timestamp" | b"timestamp" => Timestamp::run_benchmark(extrinsic, steps, repeat).ok(), - _ => None, - } + let result = match module.as_slice() { + b"pallet-balances" | b"balances" => Balances::run_benchmark( + extrinsic, + lowest_range_values, + highest_range_values, + steps, + repeat, + ), + b"pallet-identity" | b"identity" => Identity::run_benchmark( + extrinsic, + lowest_range_values, + highest_range_values, + steps, + repeat, + ), + b"pallet-timestamp" | b"timestamp" => Timestamp::run_benchmark( + extrinsic, + lowest_range_values, + highest_range_values, + steps, + repeat, + ), + _ => Err("Benchmark not found for this pallet."), + }; + + result.map_err(|e| e.into()) } } } diff --git a/bin/node/testing/Cargo.toml b/bin/node/testing/Cargo.toml index 840b2d0fefcd7258815cfd0ccb79c3dabee15946..4496047b50cff4be14a0435ffa993bbd0d232d29 100644 --- a/bin/node/testing/Cargo.toml +++ b/bin/node/testing/Cargo.toml @@ -1,53 +1,56 @@ [package] name = "node-testing" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Test utilities for Substrate node." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = true [dependencies] -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -sc-client = { version = "0.8", path = "../../../client/" } -sc-client-db = { version = "0.8", path = "../../../client/db/", features = ["kvdb-rocksdb"] } -sc-client-api = { version = "2.0", path = "../../../client/api/" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -pallet-contracts = { version = "2.0.0", path = "../../../frame/contracts" } -pallet-grandpa = { version = "2.0.0", path = "../../../frame/grandpa" } -pallet-indices = { version = "2.0.0", path = "../../../frame/indices" } -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -node-executor = { version = "2.0.0", path = "../executor" } -node-primitives = { version = "2.0.0", path = "../primitives" } -node-runtime = { version = "2.0.0", path = "../runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -frame-support = { version = "2.0.0", path = "../../../frame/support" } -pallet-session = { version = "2.0.0", path = "../../../frame/session" } -pallet-society = { version = "2.0.0", path = "../../../frame/society" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -pallet-staking = { version = "2.0.0", path = "../../../frame/staking" } -sc-executor = { version = "0.8", path = "../../../client/executor", features = ["wasmtime"] } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -substrate-test-client = { version = "2.0.0", path = "../../../test-utils/client" } -pallet-timestamp = { version = "2.0.0", path = "../../../frame/timestamp" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } -pallet-treasury = { version = "2.0.0", path = "../../../frame/treasury" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } +sc-client-db = { version = "0.8.0-alpha.2", path = "../../../client/db/", features = ["kvdb-rocksdb"] } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api/" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +pallet-contracts = { version = "2.0.0-alpha.2", path = "../../../frame/contracts" } +pallet-grandpa = { version = "2.0.0-alpha.2", path = "../../../frame/grandpa" } +pallet-indices = { version = "2.0.0-alpha.2", path = "../../../frame/indices" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } +node-executor = { version = "2.0.0-alpha.2", path = "../executor" } +node-primitives = { version = "2.0.0-alpha.2", path = "../primitives" } +node-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +frame-support = { version = "2.0.0-alpha.2", path = "../../../frame/support" } +pallet-session = { version = "2.0.0-alpha.2", path = "../../../frame/session" } +pallet-society = { version = "2.0.0-alpha.2", path = "../../../frame/society" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +pallet-staking = { version = "2.0.0-alpha.2", path = "../../../frame/staking" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor", features = ["wasmtime"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } +substrate-test-client = { version = "2.0.0-dev", path = "../../../test-utils/client" } +pallet-timestamp = { version = "2.0.0-alpha.2", path = "../../../frame/timestamp" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } +pallet-treasury = { version = "2.0.0-alpha.2", path = "../../../frame/treasury" } wabt = "0.9.2" -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../../primitives/finality-tracker" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../../primitives/timestamp" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/finality-tracker" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/timestamp" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } log = "0.4.8" -tempdir = "0.3" +tempfile = "3.1.0" fs_extra = "1" [dev-dependencies] criterion = "0.3.0" -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-service = { version = "0.8.0", path = "../../../client/service", features = ["rocksdb"] } +sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } +sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service", features = ["rocksdb"] } [[bench]] name = "import" diff --git a/bin/node/testing/benches/import.rs b/bin/node/testing/benches/import.rs index f8cbbec79d5c2ba11bdccb5d379633f7aae3ea04..79cb71b164371e3bd6bb29c0f58ce2efa28a59af 100644 --- a/bin/node/testing/benches/import.rs +++ b/bin/node/testing/benches/import.rs @@ -28,16 +28,23 @@ //! to much configuring - just block full of randomized transactions. //! It is not supposed to measure runtime modules weight correctness +use std::fmt; use node_testing::bench::{BenchDb, Profile}; +use node_primitives::Block; use sp_runtime::generic::BlockId; use criterion::{Criterion, criterion_group, criterion_main}; use sc_client_api::backend::Backend; criterion_group!( name = benches; - config = Criterion::default().sample_size(50).warm_up_time(std::time::Duration::from_secs(20)); + config = Criterion::default().sample_size(20).warm_up_time(std::time::Duration::from_secs(20)); targets = bench_block_import ); +criterion_group!( + name = wasm_size; + config = Criterion::default().sample_size(10); + targets = bench_wasm_size_import +); criterion_group!( name = profile; config = Criterion::default().sample_size(10); @@ -139,3 +146,60 @@ fn profile_block_import(c: &mut Criterion) { }, ); } + +struct Setup { + db: BenchDb, + block: Block, +} + +struct SetupIterator { + current: usize, + finish: usize, + multiplier: usize, +} + +impl SetupIterator { + fn new(current: usize, finish: usize, multiplier: usize) -> Self { + SetupIterator { current, finish, multiplier } + } +} + +impl Iterator for SetupIterator { + type Item = Setup; + + fn next(&mut self) -> Option { + if self.current >= self.finish { return None } + + self.current += 1; + + let size = self.current * self.multiplier; + let mut db = BenchDb::new(size); + let block = db.generate_block(size); + Some(Setup { db, block }) + } +} + +impl fmt::Debug for Setup { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "Setup: {} tx/block", self.block.extrinsics.len()) + } +} + +fn bench_wasm_size_import(c: &mut Criterion) { + sc_cli::init_logger(""); + + c.bench_function_over_inputs("wasm_size_import", + move |bencher, setup| { + bencher.iter_batched( + || { + setup.db.create_context(Profile::Wasm) + }, + |mut context| { + context.import_block(setup.block.clone()); + }, + criterion::BatchSize::PerIteration, + ); + }, + SetupIterator::new(5, 15, 50), + ); +} diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index 5653ba77016462ea9fd17e95c9a982223a5f4a3b..19906dd6a1bf5638bc8c07ae8e0693d00d1108a2 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -81,7 +81,7 @@ pub struct BenchDb { impl Clone for BenchDb { fn clone(&self) -> Self { let keyring = self.keyring.clone(); - let dir = tempdir::TempDir::new("sub-bench").expect("temp dir creation failed"); + let dir = tempfile::tempdir().expect("temp dir creation failed"); let seed_dir = self.directory_guard.0.path(); @@ -120,7 +120,7 @@ impl BenchDb { pub fn new(keyring_length: usize) -> Self { let keyring = BenchKeyring::new(keyring_length); - let dir = tempdir::TempDir::new("sub-bench").expect("temp dir creation failed"); + let dir = tempfile::tempdir().expect("temp dir creation failed"); log::trace!( target: "bench-logistics", "Created seed db at {}", @@ -155,6 +155,7 @@ impl BenchDb { None, None, ExecutionExtensions::new(profile.into_execution_strategies(), None), + None, ).expect("Should not fail"); (client, backend) @@ -357,7 +358,7 @@ impl Profile { } } -struct Guard(tempdir::TempDir); +struct Guard(tempfile::TempDir); impl Guard { fn path(&self) -> &Path { diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 9c163ea6153b2f11bf871883b570d1a7acd05e18..e35059e0c6ffd300cb2941e4322e22c8bbf832ee 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -69,22 +69,21 @@ pub fn config_endowed( }), pallet_session: Some(SessionConfig { keys: vec![ - (alice(), to_session_keys( + (dave(), alice(), to_session_keys( &Ed25519Keyring::Alice, &Sr25519Keyring::Alice, )), - (bob(), to_session_keys( + (eve(), bob(), to_session_keys( &Ed25519Keyring::Bob, &Sr25519Keyring::Bob, )), - (charlie(), to_session_keys( + (ferdie(), charlie(), to_session_keys( &Ed25519Keyring::Charlie, &Sr25519Keyring::Charlie, )), ] }), pallet_staking: Some(StakingConfig { - current_era: 0, stakers: vec![ (dave(), alice(), 111 * DOLLARS, pallet_staking::StakerStatus::Validator), (eve(), bob(), 100 * DOLLARS, pallet_staking::StakerStatus::Validator), diff --git a/bin/node/transaction-factory/Cargo.toml b/bin/node/transaction-factory/Cargo.toml index cd40e2f0d939db732339659aeea02f184b062096..8de968baa64f55c7076fb5da014cc518632f93d8 100644 --- a/bin/node/transaction-factory/Cargo.toml +++ b/bin/node/transaction-factory/Cargo.toml @@ -1,20 +1,22 @@ [package] name = "node-transaction-factory" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "0.8", path = "../../../client" } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } log = "0.4.8" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sc-service = { version = "0.8", path = "../../../client/service" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } diff --git a/bin/node/transaction-factory/src/lib.rs b/bin/node/transaction-factory/src/lib.rs index 7dafeed40616eaf74997fef2c10624d8e5275910..a4c001145ac7eb6bc99d7fa91740fe4d265650e3 100644 --- a/bin/node/transaction-factory/src/lib.rs +++ b/bin/node/transaction-factory/src/lib.rs @@ -83,7 +83,7 @@ pub fn factory( mut factory_state: RA, client: &Arc>, select_chain: &Sc, -) -> sc_cli::error::Result<()> +) -> sc_cli::Result<()> where Block: BlockT, Exec: sc_client::CallExecutor + Send + Sync + Clone, @@ -97,7 +97,7 @@ where RA: RuntimeAdapter, Block::Hash: From, { - let best_header: Result<::Header, sc_cli::error::Error> = + let best_header: Result<::Header, sc_cli::Error> = select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); let mut best_hash = best_header?.hash(); let mut best_block_id = BlockId::::hash(best_hash); diff --git a/bin/utils/chain-spec-builder/Cargo.toml b/bin/utils/chain-spec-builder/Cargo.toml index 8f122a35a02e67a63acdd3bf7f551d779f668d9d..5e51c4358e513d8c6a3ec5164d3aba0520d3f405 100644 --- a/bin/utils/chain-spec-builder/Cargo.toml +++ b/bin/utils/chain-spec-builder/Cargo.toml @@ -1,15 +1,17 @@ [package] name = "chain-spec-builder" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] ansi_term = "0.12.1" -sc-keystore = { version = "2.0.0", path = "../../../client/keystore" } -node-cli = { version = "2.0.0", path = "../../node/cli" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../../client/keystore" } +node-cli = { version = "2.0.0-alpha.2", path = "../../node/cli" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } rand = "0.7.2" structopt = "0.3.8" diff --git a/bin/utils/subkey/Cargo.toml b/bin/utils/subkey/Cargo.toml index 8267366d7fe02d782086fa05674fe191dcf000f8..252978a3a98cbc143139b9cdfba1169a1ebc6de9 100644 --- a/bin/utils/subkey/Cargo.toml +++ b/bin/utils/subkey/Cargo.toml @@ -1,16 +1,18 @@ [package] name = "subkey" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] futures = "0.1.29" -sp-core = { version = "*", path = "../../../primitives/core" } -node-runtime = { version = "*", path = "../../node/runtime" } -node-primitives = { version = "*", path = "../../node/primitives" } -sp-runtime = { version = "*", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +node-runtime = { version = "2.0.0-alpha.2", path = "../../node/runtime" } +node-primitives = { version = "2.0.0-alpha.2", path = "../../node/primitives" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } rand = "0.7.2" clap = "2.33.0" tiny-bip39 = "0.7" @@ -18,17 +20,17 @@ rustc-hex = "2.0.1" substrate-bip39 = "0.3.1" hex = "0.4.0" hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.0.0" } -frame-system = { version = "2.0.0", path = "../../../frame/system" } -pallet-balances = { version = "2.0.0", path = "../../../frame/balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../../../frame/transaction-payment" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +frame-system = { version = "2.0.0-alpha.2", path = "../../../frame/system" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../../../frame/balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../../../frame/transaction-payment" } rpassword = "4.0.1" itertools = "0.8.2" derive_more = { version = "0.99.2" } -sc-rpc = { version = "2.0.0", path = "../../../client/rpc" } +sc-rpc = { version = "2.0.0-alpha.2", path = "../../../client/rpc" } jsonrpc-core-client = { version = "14.0.3", features = ["http"] } hyper = "0.12.35" -libp2p = "0.16.0" +libp2p = "0.16.2" serde_json = "1.0" [features] diff --git a/client/Cargo.toml b/client/Cargo.toml index c89fe88145d148a6d64e70dcae4adac7eb97021d..61199f04da970bbefb4f494e2d46a5824538d499 100644 --- a/client/Cargo.toml +++ b/client/Cargo.toml @@ -1,41 +1,45 @@ [package] name = "sc-client" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate Client and associated logic." [dependencies] -sc-block-builder = { version = "0.8", path = "block-builder" } -sc-client-api = { version = "2.0.0", path = "api" } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-consensus = { version = "0.8", path = "../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "block-builder" } +sc-client-api = { version = "2.0.0-alpha.2", path = "api" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8", path = "executor" } -sp-externalities = { version = "0.8.0", path = "../primitives/externalities" } +sc-executor = { version = "0.8.0-alpha.2", path = "executor" } +sp-externalities = { version = "0.8.0-alpha.2", path = "../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1", features = ["compat"] } hash-db = { version = "0.15.2" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0", path = "../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../primitives/keyring" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../primitives/keyring" } kvdb = "0.4.0" log = { version = "0.4.8" } parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../primitives/core" } -sp-std = { version = "2.0.0", path = "../primitives/std" } -sp-version = { version = "2.0.0", path = "../primitives/version" } -sp-api = { version = "2.0.0", path = "../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../primitives/runtime" } -sp-blockchain = { version = "2.0.0", path = "../primitives/blockchain" } -sp-state-machine = { version = "0.8", path = "../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "telemetry" } -sp-trie = { version = "2.0.0", path = "../primitives/trie" } +sp-core = { version = "2.0.0-alpha.2", path = "../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", path = "../primitives/std" } +sp-version = { version = "2.0.0-alpha.2", path = "../primitives/version" } +sp-api = { version = "2.0.0-alpha.2", path = "../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../primitives/runtime" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../primitives/blockchain" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "telemetry" } +sp-trie = { version = "2.0.0-alpha.2", path = "../primitives/trie" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../utils/prometheus" } tracing = "0.1.10" [dev-dependencies] env_logger = "0.7.0" tempfile = "3.1.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../test-utils/runtime/client" } kvdb-memorydb = "0.4.0" -sp-panic-handler = { version = "2.0.0", path = "../primitives/panic-handler" } +sp-panic-handler = { version = "2.0.0-alpha.2", path = "../primitives/panic-handler" } diff --git a/client/api/Cargo.toml b/client/api/Cargo.toml index 27a40c4d94cfbd70a77bfeac94dc770c264b7268..7ceb12eaf60bb780bca061539852bc1a42c8819f 100644 --- a/client/api/Cargo.toml +++ b/client/api/Cargo.toml @@ -1,35 +1,41 @@ [package] name = "sc-client-api" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate client interfaces." +documentation = "https://docs.rs/sc-client-api" + [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } derive_more = { version = "0.99.2" } -sc-executor = { version = "0.8", path = "../executor" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } +sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } +sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } fnv = { version = "1.0.6" } futures = { version = "0.3.1" } hash-db = { version = "0.15.2", default-features = false } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } hex-literal = { version = "0.2.1" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } kvdb = "0.4.0" log = { version = "0.4.8" } parking_lot = "0.10.0" -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } +sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } [dev-dependencies] -sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } +sp-test-primitives = { version = "2.0.0-dev", path = "../../primitives/test-primitives" } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index a389af5671b327cb21f7454d31848fe85d645e15..d10e62cc54920e56509ddbb06a51f38296b417c1 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -21,11 +21,12 @@ use std::collections::HashMap; use sp_core::ChangesTrieConfigurationRange; use sp_core::offchain::OffchainStorage; use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, HasherFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; use sp_state_machine::{ ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, StorageCollection, ChildStorageCollection, }; +use sp_storage::{StorageData, StorageKey, ChildInfo}; use crate::{ blockchain::{ Backend as BlockchainBackend, well_known_cache_keys @@ -38,12 +39,13 @@ use sp_consensus::BlockOrigin; use parking_lot::RwLock; pub use sp_state_machine::Backend as StateBackend; +use std::marker::PhantomData; /// Extracts the state backend type for the given backend. pub type StateBackendFor = >::State; /// Extracts the transaction for the given state backend. -pub type TransactionForSB = >>::Transaction; +pub type TransactionForSB = >>::Transaction; /// Extracts the transaction for the given backend. pub type TransactionFor = TransactionForSB, Block>; @@ -111,7 +113,7 @@ impl NewBlockState { /// Keeps hold if the inserted block state and data. pub trait BlockImportOperation { /// Associated state backend type. - type State: StateBackend>; + type State: StateBackend>; /// Returns pending state. /// @@ -149,7 +151,7 @@ pub trait BlockImportOperation { /// Inject changes trie data into the database. fn update_changes_trie( &mut self, - update: ChangesTrieTransaction, NumberFor>, + update: ChangesTrieTransaction, NumberFor>, ) -> sp_blockchain::Result<()>; /// Insert auxiliary keys. @@ -169,6 +171,15 @@ pub trait BlockImportOperation { fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; } +/// Interface for performing operations on the backend. +pub trait LockImportRun> { + /// Lock the import lock, and run operations inside. + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From; +} + /// Finalize Facilities pub trait Finalizer> { /// Mark all blocks up to given as finalized in operation. @@ -228,6 +239,123 @@ pub trait AuxStore { fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>>; } +/// An `Iterator` that iterates keys in a given block under a prefix. +pub struct KeyIterator<'a, State, Block> { + state: State, + prefix: Option<&'a StorageKey>, + current_key: Vec, + _phantom: PhantomData, +} + +impl <'a, State, Block> KeyIterator<'a, State, Block> { + /// create a KeyIterator instance + pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { + Self { + state, + prefix, + current_key, + _phantom: PhantomData, + } + } +} + +impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where + Block: BlockT, + State: StateBackend>, +{ + type Item = StorageKey; + + fn next(&mut self) -> Option { + let next_key = self.state + .next_storage_key(&self.current_key) + .ok() + .flatten()?; + // this terminates the iterator the first time it fails. + if let Some(prefix) = self.prefix { + if !next_key.starts_with(&prefix.0[..]) { + return None; + } + } + self.current_key = next_key.clone(); + Some(StorageKey(next_key)) + } +} +/// Provides acess to storage primitives +pub trait StorageProvider> { + /// Given a `BlockId` and a key, return the value under the key in that block. + fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. + fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key, return the value under the hash in that block. + fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. + fn storage_pairs( + &self, + id: &BlockId, + key_prefix: &StorageKey + ) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. + fn storage_keys_iter<'a>( + &self, + id: &BlockId, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey> + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. + fn child_storage( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. + fn child_storage_keys( + &self, + id: &BlockId, + child_storage_key: &StorageKey, + child_info: ChildInfo, + key_prefix: &StorageKey + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. + fn child_storage_hash( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result>; + + /// Get longest range within [first; last] that is possible to use in `key_changes` + /// and `key_changes_proof` calls. + /// Range could be shortened from the beginning if some changes tries have been pruned. + /// Returns Ok(None) if changes tries are not supported. + fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> sp_blockchain::Result, BlockId)>>; + + /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. + /// Works only for runtimes that are supporting changes tries. + /// + /// Changes are returned in descending order (i.e. last block comes first). + fn key_changes( + &self, + first: NumberFor, + last: BlockId, + storage_key: Option<&StorageKey>, + key: &StorageKey + ) -> sp_blockchain::Result, u32)>>; +} + /// Client backend. /// /// Manages the data layer. @@ -244,7 +372,7 @@ pub trait Backend: AuxStore + Send + Sync { /// Associated blockchain backend type. type Blockchain: BlockchainBackend; /// Associated state backend type. - type State: StateBackend> + Send; + type State: StateBackend> + Send; /// Offchain workers local storage. type OffchainStorage: OffchainStorage; @@ -292,11 +420,6 @@ pub trait Backend: AuxStore + Send + Sync { /// Returns state backend with post-state of given block. fn state_at(&self, block: BlockId) -> sp_blockchain::Result; - /// Destroy state and save any useful data, such as cache. - fn destroy_state(&self, _state: Self::State) -> sp_blockchain::Result<()> { - Ok(()) - } - /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set /// it will attempt to revert past any finalized block, this is unsafe and /// can potentially leave the node in an inconsistent state. @@ -335,10 +458,10 @@ pub trait Backend: AuxStore + Send + Sync { /// Changes trie storage that supports pruning. pub trait PrunableStateChangesTrieStorage: - StateChangesTrieStorage, NumberFor> + StateChangesTrieStorage, NumberFor> { /// Get reference to StateChangesTrieStorage. - fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; + fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; /// Get configuration at given block. fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< ChangesTrieConfigurationRange, Block::Hash> @@ -368,7 +491,7 @@ pub trait RemoteBackend: Backend { pub fn changes_tries_state_at_block<'a, Block: BlockT>( block: &BlockId, maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, -) -> sp_blockchain::Result, NumberFor>>> { +) -> sp_blockchain::Result, NumberFor>>> { let storage = match maybe_storage { Some(storage) => storage, None => return Ok(None), diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index aff38a26aadd6e4e784471ddd4ba0efe9cb17f36..f39d7971578967724c638f21842e2fa82aed55f5 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -19,7 +19,7 @@ use std::{panic::UnwindSafe, result, cell::RefCell}; use codec::{Encode, Decode}; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HasherFor}, + generic::BlockId, traits::{Block as BlockT, HashFor}, }; use sp_state_machine::{ OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, @@ -29,6 +29,18 @@ use sp_externalities::Extensions; use sp_core::NativeOrEncoded; use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use crate::execution_extensions::ExecutionExtensions; + +/// Executor Provider +pub trait ExecutorProvider { + /// executor instance + type Executor: CallExecutor; + /// Get call executor reference. + fn executor(&self) -> &Self::Executor; + + /// Get a reference to the execution extensions. + fn execution_extensions(&self) -> &ExecutionExtensions; +} /// Method call executor. pub trait CallExecutor { @@ -89,7 +101,7 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given state, gathering execution proof. /// /// No changes are made. - fn prove_at_state>>( + fn prove_at_state>>( &self, mut state: S, overlay: &mut OverlayedChanges, @@ -107,9 +119,9 @@ pub trait CallExecutor { /// Execute a call to a contract on top of given trie state, gathering execution proof. /// /// No changes are made. - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] diff --git a/client/api/src/client.rs b/client/api/src/client.rs index 7503ce4a79e4627eb9fac7527e3670cfb35c23f3..22503732be4e98bddc150cad0bd08ea142e6d772 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -21,7 +21,7 @@ use futures::channel::mpsc; use sp_core::storage::StorageKey; use sp_runtime::{ traits::{Block as BlockT, NumberFor}, - generic::BlockId + generic::{BlockId, SignedBlock} }; use sp_consensus::BlockOrigin; @@ -76,9 +76,13 @@ pub trait BlockchainEvents { /// Fetch block body by ID. pub trait BlockBody { /// Get block body by ID. Returns `None` if the body is not stored. - fn block_body(&self, + fn block_body( + &self, id: &BlockId ) -> sp_blockchain::Result::Extrinsic>>>; + + /// Get full block by id. + fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; } /// Provide a list of potential uncle headers for a given block. @@ -97,13 +101,56 @@ pub struct ClientInfo { pub usage: Option, } +/// A wrapper to store the size of some memory. +#[derive(Default, Clone, Debug, Copy)] +pub struct MemorySize(usize); + +impl MemorySize { + /// Creates `Self` from the given `bytes` size. + pub fn from_bytes(bytes: usize) -> Self { + Self(bytes) + } + + /// Returns the memory size as bytes. + pub fn as_bytes(self) -> usize { + self.0 + } +} + +impl fmt::Display for MemorySize { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0 < 1024 { + write!(f, "{} bytes", self.0) + } else if self.0 < 1024 * 1024 { + write!(f, "{:.2} KiB", self.0 as f64 / 1024f64) + } else if self.0 < 1024 * 1024 * 1024 { + write!(f, "{:.2} MiB", self.0 as f64 / (1024f64 * 1024f64)) + } else { + write!(f, "{:.2} GiB", self.0 as f64 / (1024f64 * 1024f64 * 1024f64)) + } + } +} + +/// Memory statistics for state db. +#[derive(Default, Clone, Debug)] +pub struct StateDbMemoryInfo { + /// Memory usage of the non-canonical overlay + pub non_canonical: MemorySize, + /// Memory usage of the pruning window. + pub pruning: Option, + /// Memory usage of the pinned blocks. + pub pinned: MemorySize, +} + /// Memory statistics for client instance. #[derive(Default, Clone, Debug)] pub struct MemoryInfo { /// Size of state cache. - pub state_cache: usize, + pub state_cache: MemorySize, /// Size of backend database cache. - pub database_cache: usize, + pub database_cache: MemorySize, + /// Size of the state db. + pub state_db: StateDbMemoryInfo, } /// I/O statistics for client instance. @@ -144,10 +191,16 @@ pub struct UsageInfo { impl fmt::Display for UsageInfo { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, - "caches: ({} state, {} db overlay), i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} key writes)", + write!( + f, + "caches: ({} state, {} db overlay), \ + state db: ({} non-canonical, {} pruning, {} pinned), \ + i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} key writes)", self.memory.state_cache, self.memory.database_cache, + self.memory.state_db.non_canonical, + self.memory.state_db.pruning.unwrap_or_default(), + self.memory.state_db.pinned, self.io.transactions, self.io.bytes_written, self.io.bytes_read, diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index 69d0c94ac2b8bb9ef0bd71661c76a922b8fba977..66f51e75c793782123757be13a24c23ede1f96fa 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -23,6 +23,7 @@ pub mod client; pub mod execution_extensions; pub mod light; pub mod notifications; +pub mod proof_provider; pub use sp_blockchain as blockchain; pub use backend::*; @@ -31,6 +32,7 @@ pub use call_executor::*; pub use client::*; pub use light::*; pub use notifications::*; +pub use proof_provider::*; pub use sp_state_machine::{StorageProof, ExecutionStrategy}; diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs new file mode 100644 index 0000000000000000000000000000000000000000..2d9876f7ad278dd7067199f9090122d5c9c073ab --- /dev/null +++ b/client/api/src/proof_provider.rs @@ -0,0 +1,71 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . +//! Proof utilities +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT}, +}; +use crate::{StorageProof, ChangesProof}; +use sp_storage::{ChildInfo, StorageKey}; + +/// Interface for providing block proving utilities. +pub trait ProofProvider { + /// Reads storage value at a given block + key, returning read proof. + fn read_proof( + &self, + id: &BlockId, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result; + + /// Reads child storage value at a given block + storage_key + key, returning + /// read proof. + fn read_child_proof( + &self, + id: &BlockId, + storage_key: &[u8], + child_info: ChildInfo, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result; + + /// Execute a call to a contract on top of state in a block of given hash + /// AND returning execution proof. + /// + /// No changes are made. + fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)>; + /// Reads given header and generates CHT-based header proof. + fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + + /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. + /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using + /// changes tries from ascendants of this block, we should provide proofs for changes tries roots + /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants + /// of this block. + /// Works only for runtimes that are supporting changes tries. + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result>; +} diff --git a/client/authority-discovery/Cargo.toml b/client/authority-discovery/Cargo.toml index 248304d137018b0d0fa6e86490ac1f4e6016db72..0c3b739bb9deb9ca582d88e49821ad15cae593cf 100644 --- a/client/authority-discovery/Cargo.toml +++ b/client/authority-discovery/Cargo.toml @@ -1,36 +1,39 @@ [package] name = "sc-authority-discovery" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate authority discovery." [build-dependencies] prost-build = "0.6.1" [dependencies] -bytes = "0.4.12" -codec = { package = "parity-scale-codec", default-features = false, version = "1.0.3" } +bytes = "0.5.0" +codec = { package = "parity-scale-codec", default-features = false, version = "1.2.0" } derive_more = "0.99.2" futures = "0.3.1" futures-timer = "3.0.1" -libp2p = { version = "0.16.0", default-features = false, features = ["secp256k1", "libp2p-websocket"] } +libp2p = { version = "0.16.2", default-features = false, features = ["secp256k1", "libp2p-websocket"] } log = "0.4.8" prost = "0.6.1" rand = "0.7.2" -sc-client-api = { version = "2.0.0", path = "../api" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sc-network = { version = "0.8", path = "../network" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } serde_json = "1.0.41" -sp-authority-discovery = { version = "2.0.0", path = "../../primitives/authority-discovery" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sp-authority-discovery = { version = "2.0.0-alpha.2", path = "../../primitives/authority-discovery" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } [dev-dependencies] env_logger = "0.7.0" quickcheck = "0.9.0" -sc-peerset = { version = "2.0.0", path = "../peerset" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client"} +sc-peerset = { version = "2.0.0-alpha.2", path = "../peerset" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client"} diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 6260ac9a85b12ec0af93227048e8279438161c11..92dcc264502f03612463b44d128148a2aeddebe3 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -60,7 +60,6 @@ use libp2p::Multiaddr; use log::{debug, error, log_enabled, warn}; use prost::Message; use sc_client_api::blockchain::HeaderBackend; -use sc_network::specialization::NetworkSpecialization; use sc_network::{DhtEvent, ExHashT, NetworkStateInfo}; use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; use sp_core::crypto::{key_types, Pair}; @@ -477,10 +476,9 @@ pub trait NetworkProvider: NetworkStateInfo { fn get_value(&self, key: &libp2p::kad::record::Key); } -impl NetworkProvider for sc_network::NetworkService +impl NetworkProvider for sc_network::NetworkService where B: BlockT + 'static, - S: NetworkSpecialization, H: ExHashT, { fn set_priority_group( diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index 77ed6a1d948e2897ea331a5727d49e1d7e416200..55ee6c7aac13ace754865a6ae32de814a29eb4a3 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -215,7 +215,7 @@ impl ApiExt for RuntimeApi { fn into_storage_changes( &self, _: &Self::StateBackend, - _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, + _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, _: ::Hash, ) -> std::result::Result, String> where Self: Sized diff --git a/client/basic-authorship/Cargo.toml b/client/basic-authorship/Cargo.toml index 6a013f7a74fce889d329bc930da20c8aca02cbd9..7503221f6556072b8b45d69b5b1f420397638a9c 100644 --- a/client/basic-authorship/Cargo.toml +++ b/client/basic-authorship/Cargo.toml @@ -1,28 +1,30 @@ [package] name = "sc-basic-authorship" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Basic implementation of block-authoring logic." [dependencies] log = "0.4.8" futures = "0.3.1" -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-client = { version = "0.8", path = "../" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-block-builder = { version = "0.8", path = "../block-builder" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../primitives/inherents" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } tokio-executor = { version = "0.2.0-alpha.6", features = ["blocking"] } [dev-dependencies] -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../client/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } parking_lot = "0.10.0" diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index a99453544e5f63e3a6a6e5d62845b465d4bc86d8..41216c2b5442116ca8eac2bcbabeaafbba940254 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -19,48 +19,59 @@ // FIXME #1021 move this into sp-consensus use std::{time, sync::Arc}; -use sc_client_api::{CallExecutor, backend}; -use sc_client::Client as SubstrateClient; +use sc_client_api::backend; use codec::Decode; use sp_consensus::{evaluation, Proposal, RecordProof}; use sp_inherents::InherentData; use log::{error, info, debug, trace}; use sp_core::ExecutionContext; use sp_runtime::{ - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, generic::BlockId, + traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, }; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sc_block_builder::BlockBuilderApi; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sp_api::{ProvideRuntimeApi, ApiExt}; use futures::prelude::*; +use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed}; +use std::marker::PhantomData; /// Proposer factory. -pub struct ProposerFactory where A: TransactionPool { +pub struct ProposerFactory { /// The client instance. - pub client: Arc, + client: Arc, /// The transaction pool. - pub transaction_pool: Arc, + transaction_pool: Arc, + /// phantom member to pin the `Backend` type. + _phantom: PhantomData, +} + +impl ProposerFactory { + pub fn new(client: Arc, transaction_pool: Arc) -> Self { + ProposerFactory { + client, + transaction_pool, + _phantom: PhantomData, + } + } } -impl ProposerFactory, A> +impl ProposerFactory where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - BlockBuilderApi + - ApiExt>, + C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + + Send + Sync + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { pub fn init_with_now( &mut self, parent_header: &::Header, now: Box time::Instant + Send + Sync>, - ) -> Proposer, A> { + ) -> Proposer { let parent_hash = parent_header.hash(); let id = BlockId::hash(parent_hash); @@ -75,6 +86,7 @@ impl ProposerFactory, A> parent_number: *parent_header.number(), transaction_pool: self.transaction_pool.clone(), now, + _phantom: PhantomData, }), }; @@ -82,21 +94,19 @@ impl ProposerFactory, A> } } -impl sp_consensus::Environment for - ProposerFactory, A> +impl sp_consensus::Environment for + ProposerFactory where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - BlockBuilderApi + - ApiExt>, + C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + + Send + Sync + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { type CreateProposer = future::Ready>; - type Proposer = Proposer, A>; + type Proposer = Proposer; type Error = sp_blockchain::Error; fn init( @@ -108,32 +118,31 @@ impl sp_consensus::Environment for } /// The proposer logic. -pub struct Proposer { - inner: Arc>, +pub struct Proposer { + inner: Arc>, } /// Proposer inner, to wrap parameters under Arc. -struct ProposerInner { +struct ProposerInner { client: Arc, parent_hash: ::Hash, parent_id: BlockId, parent_number: <::Header as HeaderT>::Number, transaction_pool: Arc, now: Box time::Instant + Send + Sync>, + _phantom: PhantomData, } -impl sp_consensus::Proposer for - Proposer, A> +impl sp_consensus::Proposer for + Proposer where A: TransactionPool + 'static, B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - BlockBuilderApi + - ApiExt>, + C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + + Send + Sync + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { type Transaction = backend::TransactionFor; type Proposal = tokio_executor::blocking::Blocking< @@ -157,16 +166,15 @@ impl sp_consensus::Proposer for } } -impl ProposerInner, A> where - A: TransactionPool, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + Clone + 'static, - Block: BlockT, - RA: Send + Sync + 'static, - SubstrateClient: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - BlockBuilderApi + - ApiExt>, +impl ProposerInner + where + A: TransactionPool, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi + + Send + Sync + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { fn propose_with( &self, @@ -222,7 +230,7 @@ impl ProposerInner, Ok(()) => { debug!("[{:?}] Pushed to the block.", pending_tx_hash); } - Err(sp_blockchain::Error::ApplyExtrinsicFailed(sp_blockchain::ApplyExtrinsicFailed::Validity(e))) + Err(sp_blockchain::Error::ApplyExtrinsicFailed(ApplyExtrinsicFailed::Validity(e))) if e.exhausted_resources() => { if is_first { debug!("[{:?}] Invalid transaction: FullBlock on empty block", pending_tx_hash); @@ -238,6 +246,13 @@ impl ProposerInner, break; } } + Err(e) if skipped > 0 => { + trace!( + "[{:?}] Ignoring invalid transaction when skipping: {}", + pending_tx_hash, + e + ); + } Err(e) => { debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); unqueue_invalid.push(pending_tx_hash); @@ -284,10 +299,10 @@ mod tests { use super::*; use parking_lot::Mutex; - use sp_consensus::Proposer; + use sp_consensus::{BlockOrigin, Proposer}; use substrate_test_runtime_client::{ - runtime::{Extrinsic, Transfer}, AccountKeyring, DefaultTestClientBuilderExt, - TestClientBuilderExt, + prelude::*, + runtime::{Extrinsic, Transfer}, }; use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_api::Core; @@ -315,10 +330,7 @@ mod tests { txpool.submit_at(&BlockId::number(0), vec![extrinsic(0), extrinsic(1)]) ).unwrap(); - let mut proposer_factory = ProposerFactory { - client: client.clone(), - transaction_pool: txpool.clone(), - }; + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); let cell = Mutex::new(time::Instant::now()); let mut proposer = proposer_factory.init_with_now( @@ -359,10 +371,7 @@ mod tests { txpool.submit_at(&BlockId::number(0), vec![extrinsic(0)]), ).unwrap(); - let mut proposer_factory = ProposerFactory { - client: client.clone(), - transaction_pool: txpool.clone(), - }; + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); let mut proposer = proposer_factory.init_with_now( &client.header(&block_id).unwrap().unwrap(), @@ -393,4 +402,73 @@ mod tests { storage_changes.transaction_storage_root, ); } + + #[test] + fn should_not_remove_invalid_transactions_when_skipping() { + // given + let mut client = Arc::new(substrate_test_runtime_client::new()); + let txpool = Arc::new( + BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone()))).0 + ); + + futures::executor::block_on( + txpool.submit_at(&BlockId::number(0), vec![ + extrinsic(0), + extrinsic(1), + Transfer { + amount: Default::default(), + nonce: 2, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_resources_exhausting_tx(), + extrinsic(3), + Transfer { + amount: Default::default(), + nonce: 4, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }.into_resources_exhausting_tx(), + extrinsic(5), + extrinsic(6), + ]) + ).unwrap(); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + let mut propose_block = | + client: &TestClient, + number, + expected_block_extrinsics, + expected_pool_transactions, + | { + let mut proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(number)).unwrap().unwrap(), + Box::new(move || time::Instant::now()), + ); + + // when + let deadline = time::Duration::from_secs(9); + let block = futures::executor::block_on( + proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) + ).map(|r| r.block).unwrap(); + + // then + // block should have some extrinsics although we have some more in the pool. + assert_eq!(block.extrinsics().len(), expected_block_extrinsics); + assert_eq!(txpool.ready().count(), expected_pool_transactions); + + block + }; + + // let's create one block and import it + let block = propose_block(&client, 0, 2, 7); + client.import(BlockOrigin::Own, block).unwrap(); + + // now let's make sure that we can still make some progress + + // This is most likely incorrect, and caused by #5139 + let tx_remaining = 0; + let block = propose_block(&client, 1, 2, tx_remaining); + client.import(BlockOrigin::Own, block).unwrap(); + } } + diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index e9087c89e07ed373ec2cb932b37b2b6822f570e3..5ec0bc6f9a520c5d8292ce21cc219b5a8a658bab 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -28,10 +28,7 @@ //! # let client = Arc::new(substrate_test_runtime_client::new()); //! # let txpool = Arc::new(BasicPool::new(Default::default(), Arc::new(FullChainApi::new(client.clone()))).0); //! // The first step is to create a `ProposerFactory`. -//! let mut proposer_factory = ProposerFactory { -//! client: client.clone(), -//! transaction_pool: txpool.clone(), -//! }; +//! let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); //! //! // From this factory, we create a `Proposer`. //! let proposer = proposer_factory.init( diff --git a/client/block-builder/Cargo.toml b/client/block-builder/Cargo.toml index 383a931b2f8998288a362713a6a24aacaeff70c9..745669c033e3870d27cb4b349bb2aee05187c9cd 100644 --- a/client/block-builder/Cargo.toml +++ b/client/block-builder/Cargo.toml @@ -1,17 +1,21 @@ [package] name = "sc-block-builder" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate block builder" + [dependencies] -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-consensus = { version = "0.8.0", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-block-builder = { version = "2.0.0", path = "../../primitives/block-builder" } -sc-client-api = { version = "2.0.0", path = "../api" } -codec = { package = "parity-scale-codec", version = "1.0.6", features = ["derive"] } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../primitives/block-builder" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 26bc9ecea8deac6ecd5f3988878555183cc26aa7..2666fd9cd713999f863729b50d9afbdcdb5ec9f6 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -28,9 +28,7 @@ use codec::Encode; use sp_runtime::{ generic::BlockId, - traits::{ - Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One, HasherFor, - }, + traits::{Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One}, }; use sp_blockchain::{ApplyExtrinsicFailed, Error}; use sp_core::ExecutionContext; @@ -47,7 +45,7 @@ use sc_client_api::backend; /// backend to get the state of the block. Furthermore an optional `proof` is included which /// can be used to proof that the build block contains the expected data. The `proof` will /// only be set when proof recording was activated. -pub struct BuiltBlock>> { +pub struct BuiltBlock>> { /// The actual block that was build. pub block: Block, /// The changes that need to be applied to the backend to get the state of the build block. @@ -56,13 +54,34 @@ pub struct BuiltBlock, } -impl>> BuiltBlock { +impl>> BuiltBlock { /// Convert into the inner values. pub fn into_inner(self) -> (Block, StorageChanges, Option) { (self.block, self.storage_changes, self.proof) } } +/// Block builder provider +pub trait BlockBuilderProvider + where + Block: BlockT, + B: backend::Backend, + Self: Sized, + RA: ProvideRuntimeApi, +{ + /// Create a new block, built on top of `parent`. + /// + /// When proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to proof the + /// output of this block builder without having access to the full storage. + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result>; +} + /// Utility for building new (valid) blocks from a stream of extrinsics. pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { extrinsics: Vec, @@ -121,7 +140,7 @@ where backend, }) } - + /// Push onto the block's list of extrinsics. /// /// This will ensure the extrinsic can be validly executed (by executing it). @@ -131,8 +150,7 @@ where /// Push onto the block's list of extrinsics. /// - /// This will treat incoming extrinsic `xt` as untrusted and perform additional checks - /// (currenty checking signature). + /// This will treat incoming extrinsic `xt` as trusted and skip signature check (for signed transactions). pub fn push_trusted(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { self.push_internal(xt, true) } @@ -141,60 +159,36 @@ where let block_id = &self.block_id; let extrinsics = &mut self.extrinsics; - if self + let use_trusted = skip_signature && self .api .has_api_with::>, _>( block_id, - |version| version < 4, - )? - { - // Run compatibility fallback for v3. - self.api.map_api_result(|api| { - #[allow(deprecated)] - match api.apply_extrinsic_before_version_4_with_context( + |version| version >= 5, + )?; + + self.api.map_api_result(|api| { + let apply_result = if use_trusted { + api.apply_trusted_extrinsic_with_context( block_id, ExecutionContext::BlockConstruction, xt.clone(), - )? { - Ok(_) => { - extrinsics.push(xt); - Ok(()) - } - Err(e) => Err(ApplyExtrinsicFailed::from(e).into()), - } - }) - } else { - let use_trusted = skip_signature && self - .api - .has_api_with::>, _>( + )? + } else { + api.apply_extrinsic_with_context( block_id, - |version| version >= 5, - )?; - - self.api.map_api_result(|api| { - let apply_result = if use_trusted { - api.apply_trusted_extrinsic_with_context( - block_id, - ExecutionContext::BlockConstruction, - xt.clone(), - )? - } else { - api.apply_extrinsic_with_context( - block_id, - ExecutionContext::BlockConstruction, - xt.clone(), - )? - }; - - match apply_result { - Ok(_) => { - extrinsics.push(xt); - Ok(()) - } - Err(tx_validity) => Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + ExecutionContext::BlockConstruction, + xt.clone(), + )? + }; + + match apply_result { + Ok(_) => { + extrinsics.push(xt); + Ok(()) } - }) - } + Err(tx_validity) => Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + } + }) } /// Consume the builder to build a valid `Block` containing all pushed extrinsics. @@ -230,17 +224,11 @@ where &state, changes_trie_state.as_ref(), parent_hash, - ); - - // We need to destroy the state, before we check if `storage_changes` is `Ok(_)` - { - let _lock = self.backend.get_import_lock().read(); - self.backend.destroy_state(state)?; - } + )?; Ok(BuiltBlock { block: ::new(header, self.extrinsics), - storage_changes: storage_changes?, + storage_changes, proof, }) } diff --git a/client/chain-spec/Cargo.toml b/client/chain-spec/Cargo.toml index 222914145e160cbc71e9e7007a7e1f70c01d1380..10d4fc2622480c70dbbd938e3210b1421c3e0f84 100644 --- a/client/chain-spec/Cargo.toml +++ b/client/chain-spec/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "sc-chain-spec" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate chain configurations." [dependencies] -sc-chain-spec-derive = { version = "2.0.0", path = "./derive" } +sc-chain-spec-derive = { version = "2.0.0-alpha.2", path = "./derive" } impl-trait-for-tuples = "0.1.3" -sc-network = { version = "0.8", path = "../network" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } diff --git a/client/chain-spec/derive/Cargo.toml b/client/chain-spec/derive/Cargo.toml index 566948883bccb18890de62e54ae3a3547737357c..df0e2f92b2e94a2bfbf47d5fd63214925919c14d 100644 --- a/client/chain-spec/derive/Cargo.toml +++ b/client/chain-spec/derive/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-chain-spec-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Macros to derive chain spec extension traits implementation." [lib] proc-macro = true @@ -11,7 +14,7 @@ proc-macro = true [dependencies] proc-macro-crate = "0.1.4" proc-macro2 = "1.0.6" -quote = "1.0.2" +quote = "1.0.3" syn = "1.0.7" [dev-dependencies] diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index ab9c851bdde5fb862188b871bbf35b063db4188e..a7e5738fc4bd8883b70697640f09111e5910901e 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -117,8 +117,8 @@ struct ChildRawStorage { #[serde(deny_unknown_fields)] /// Storage content for genesis block. struct RawGenesis { - pub top: GenesisStorage, - pub children: HashMap, + top: GenesisStorage, + children: HashMap, } #[derive(Serialize, Deserialize)] @@ -134,14 +134,14 @@ enum Genesis { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] struct ClientSpec { - pub name: String, - pub id: String, - pub boot_nodes: Vec, - pub telemetry_endpoints: Option, - pub protocol_id: Option, - pub properties: Option, + name: String, + id: String, + boot_nodes: Vec, + telemetry_endpoints: Option, + protocol_id: Option, + properties: Option, #[serde(flatten)] - pub extensions: E, + extensions: E, // Never used, left only for backward compatibility. consensus_engine: (), #[serde(skip_serializing)] diff --git a/client/cli/Cargo.toml b/client/cli/Cargo.toml index e176894d64c078dff802e49875c6a5321b1c6a18..588aa134ab172e3ce8988e43bac5b9a72049b5ff 100644 --- a/client/cli/Cargo.toml +++ b/client/cli/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "sc-cli" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Substrate CLI interface." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] clap = "2.33.0" @@ -21,20 +23,21 @@ tokio = { version = "0.2.9", features = [ "signal", "rt-core", "rt-threaded" ] } futures = "0.3.1" fdlimit = "0.1.1" serde_json = "1.0.41" -sc-informant = { version = "0.8", path = "../informant" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-service = { version = "0.8", default-features = false, path = "../service" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } +sc-informant = { version = "0.8.0-alpha.2", path = "../informant" } +sp-panic-handler = { version = "2.0.0-alpha.2", path = "../../primitives/panic-handler" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../service" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +substrate-prometheus-endpoint = { path = "../../utils/prometheus" , version = "0.8.0-alpha.2"} +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } names = "0.11.0" structopt = "0.3.8" -sc-tracing = { version = "2.0.0", path = "../tracing" } +sc-tracing = { version = "2.0.0-alpha.2", path = "../tracing" } chrono = "0.4.10" parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } diff --git a/client/cli/src/execution_strategy.rs b/client/cli/src/arg_enums.rs similarity index 51% rename from client/cli/src/execution_strategy.rs rename to client/cli/src/arg_enums.rs index 888d7b6c4a09680103c15b44e27b67b700f6505c..384087bec0dbe4538c7227e36a6dbce7d789586b 100644 --- a/client/cli/src/execution_strategy.rs +++ b/client/cli/src/arg_enums.rs @@ -14,10 +14,74 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +// NOTE: we allow missing docs here because arg_enum! creates the function variants without doc #![allow(missing_docs)] use structopt::clap::arg_enum; +arg_enum! { + /// How to execute Wasm runtime code + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum WasmExecutionMethod { + // Uses an interpreter. + Interpreted, + // Uses a compiled runtime. + Compiled, + } +} + +impl WasmExecutionMethod { + /// Returns list of variants that are not disabled by feature flags. + pub fn enabled_variants() -> Vec<&'static str> { + Self::variants() + .iter() + .cloned() + .filter(|&name| cfg!(feature = "wasmtime") || name != "Compiled") + .collect() + } +} + +impl Into for WasmExecutionMethod { + fn into(self) -> sc_service::config::WasmExecutionMethod { + match self { + WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, + #[cfg(feature = "wasmtime")] + WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, + #[cfg(not(feature = "wasmtime"))] + WasmExecutionMethod::Compiled => panic!( + "Substrate must be compiled with \"wasmtime\" feature for compiled Wasm execution" + ), + } + } +} + +arg_enum! { + #[allow(missing_docs)] + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum TracingReceiver { + Log, + Telemetry, + } +} + +impl Into for TracingReceiver { + fn into(self) -> sc_tracing::TracingReceiver { + match self { + TracingReceiver::Log => sc_tracing::TracingReceiver::Log, + TracingReceiver::Telemetry => sc_tracing::TracingReceiver::Telemetry, + } + } +} + +arg_enum! { + #[allow(missing_docs)] + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum NodeKeyType { + Ed25519 + } +} + arg_enum! { /// How to execute blocks #[derive(Debug, Clone, Copy, PartialEq, Eq)] @@ -33,6 +97,17 @@ arg_enum! { } } +impl Into for ExecutionStrategy { + fn into(self) -> sc_client_api::ExecutionStrategy { + match self { + ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible, + ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm, + ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both, + ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm, + } + } +} + impl ExecutionStrategy { /// Returns the variant as `'&static str`. pub fn as_str(&self) -> &'static str { diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..9b71207efabf8de381d7066b284356e6c2663c11 --- /dev/null +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -0,0 +1,104 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use structopt::StructOpt; +use log::info; +use sc_network::config::build_multiaddr; +use sc_service::{Configuration, ChainSpecExtension, RuntimeGenesis, ChainSpec}; + +use crate::error; +use crate::VersionInfo; +use crate::params::SharedParams; +use crate::params::NodeKeyParams; + +/// The `build-spec` command used to build a specification. +#[derive(Debug, StructOpt, Clone)] +pub struct BuildSpecCmd { + /// Force raw genesis storage output. + #[structopt(long = "raw")] + pub raw: bool, + + /// Disable adding the default bootnode to the specification. + /// + /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the + /// specification when no bootnode exists. + #[structopt(long = "disable-default-bootnode")] + pub disable_default_bootnode: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, +} + +impl BuildSpecCmd { + /// Run the build-spec command + pub fn run( + self, + config: Configuration, + ) -> error::Result<()> + where + G: RuntimeGenesis, + E: ChainSpecExtension, + { + info!("Building chain spec"); + let mut spec = config.expect_chain_spec().clone(); + let raw_output = self.raw; + + if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { + let keys = config.network.node_key.into_keypair()?; + let peer_id = keys.public().into_peer_id(); + let addr = build_multiaddr![ + Ip4([127, 0, 0, 1]), + Tcp(30333u16), + P2p(peer_id) + ]; + spec.add_boot_node(addr) + } + + let json = sc_service::chain_ops::build_spec(spec, raw_output)?; + + print!("{}", json); + + Ok(()) + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + + let net_config_path = config + .in_chain_config_dir(crate::commands::DEFAULT_NETWORK_CONFIG_PATH) + .expect("We provided a base_path"); + + self.node_key_params.update_config(&mut config, Some(&net_config_path))?; + + Ok(()) + } +} + diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..1036be16de4ebcde466710781f4cfecb283216eb --- /dev/null +++ b/client/cli/src/commands/check_block_cmd.rs @@ -0,0 +1,105 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::fmt::Debug; +use std::str::FromStr; +use structopt::StructOpt; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, Roles, ChainSpec, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::generic::BlockId; + +use crate::error; +use crate::VersionInfo; +use crate::runtime::run_until_exit; +use crate::params::SharedParams; +use crate::params::ImportParams; + +/// The `check-block` command used to validate blocks. +#[derive(Debug, StructOpt, Clone)] +pub struct CheckBlockCmd { + /// Block hash or number + #[structopt(value_name = "HASH or NUMBER")] + pub input: String, + + /// The default number of 64KB pages to ever allocate for Wasm execution. + /// + /// Don't alter this unless you know what you're doing. + #[structopt(long = "default-heap-pages", value_name = "COUNT")] + pub default_heap_pages: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, +} + +impl CheckBlockCmd { + /// Run the check-block command + pub fn run( + self, + config: Configuration, + builder: B, + ) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + G: RuntimeGenesis, + E: ChainSpecExtension, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let input = if self.input.starts_with("0x") { &self.input[2..] } else { &self.input[..] }; + let block_id = match FromStr::from_str(input) { + Ok(hash) => BlockId::hash(hash), + Err(_) => match self.input.parse::() { + Ok(n) => BlockId::number((n as u32).into()), + Err(_) => return Err(error::Error::Input("Invalid hash or number specified".into())), + } + }; + + let start = std::time::Instant::now(); + run_until_exit(config, |config| { + Ok(builder(config)?.check_block(block_id)) + })?; + println!("Completed in {} ms.", start.elapsed().as_millis()); + + Ok(()) + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + self.import_params.update_config(&mut config, Roles::FULL, self.shared_params.dev)?; + config.use_in_memory_keystore()?; + + Ok(()) + } +} diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..21195cccd46648aa7c1efabe332e596168b9c098 --- /dev/null +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -0,0 +1,117 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::io; +use std::fs; +use std::path::PathBuf; +use std::fmt::Debug; +use log::info; +use structopt::StructOpt; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, ChainSpec, + config::DatabaseConfig, Roles, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +use crate::error; +use crate::VersionInfo; +use crate::runtime::run_until_exit; +use crate::params::{SharedParams, BlockNumber, PruningParams}; + +/// The `export-blocks` command used to export blocks. +#[derive(Debug, StructOpt, Clone)] +pub struct ExportBlocksCmd { + /// Output file name or stdout if unspecified. + #[structopt(parse(from_os_str))] + pub output: Option, + + /// Specify starting block number. + /// + /// Default is 1. + #[structopt(long = "from", value_name = "BLOCK")] + pub from: Option, + + /// Specify last block number. + /// + /// Default is best block. + #[structopt(long = "to", value_name = "BLOCK")] + pub to: Option, + + /// Use binary output rather than JSON. + #[structopt(long = "binary", value_name = "BOOL", parse(try_from_str), default_value("false"))] + pub binary: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, +} + +impl ExportBlocksCmd { + /// Run the export-blocks command + pub fn run( + self, + config: Configuration, + builder: B, + ) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + G: RuntimeGenesis, + E: ChainSpecExtension, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + if let DatabaseConfig::Path { ref path, .. } = config.expect_database() { + info!("DB path: {}", path.display()); + } + let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); + let to = self.to.as_ref().and_then(|t| t.parse().ok()); + + let binary = self.binary; + + let file: Box = match &self.output { + Some(filename) => Box::new(fs::File::create(filename)?), + None => Box::new(io::stdout()), + }; + + run_until_exit(config, |config| { + Ok(builder(config)?.export_blocks(file, from.into(), to, binary)) + }) + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + self.pruning_params.update_config(&mut config, Roles::FULL, true)?; + config.use_in_memory_keystore()?; + + Ok(()) + } +} diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..60a57ab78d1b1b3563d4f3a047f3be70152820eb --- /dev/null +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -0,0 +1,107 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::fmt::Debug; +use std::io::{Read, Seek, self}; +use std::fs; +use std::path::PathBuf; +use structopt::StructOpt; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, ChainSpec, Roles, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +use crate::error; +use crate::VersionInfo; +use crate::runtime::run_until_exit; +use crate::params::SharedParams; +use crate::params::ImportParams; + +/// The `import-blocks` command used to import blocks. +#[derive(Debug, StructOpt, Clone)] +pub struct ImportBlocksCmd { + /// Input file or stdin if unspecified. + #[structopt(parse(from_os_str))] + pub input: Option, + + /// The default number of 64KB pages to ever allocate for Wasm execution. + /// + /// Don't alter this unless you know what you're doing. + #[structopt(long = "default-heap-pages", value_name = "COUNT")] + pub default_heap_pages: Option, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, +} + +/// Internal trait used to cast to a dynamic type that implements Read and Seek. +trait ReadPlusSeek: Read + Seek {} + +impl ReadPlusSeek for T {} + +impl ImportBlocksCmd { + /// Run the import-blocks command + pub fn run( + self, + config: Configuration, + builder: B, + ) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + G: RuntimeGenesis, + E: ChainSpecExtension, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let file: Box = match &self.input { + Some(filename) => Box::new(fs::File::open(filename)?), + None => { + let mut buffer = Vec::new(); + io::stdin().read_to_end(&mut buffer)?; + Box::new(io::Cursor::new(buffer)) + }, + }; + + run_until_exit(config, |config| { + Ok(builder(config)?.import_blocks(file, false)) + }) + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + self.import_params.update_config(&mut config, Roles::FULL, self.shared_params.dev)?; + config.use_in_memory_keystore()?; + + Ok(()) + } +} diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..e9f991c7458837a97926e7b47520ba331bacc93d --- /dev/null +++ b/client/cli/src/commands/mod.rs @@ -0,0 +1,145 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +mod runcmd; +mod export_blocks_cmd; +mod build_spec_cmd; +mod import_blocks_cmd; +mod check_block_cmd; +mod revert_cmd; +mod purge_chain_cmd; + +use std::fmt::Debug; +use structopt::StructOpt; + +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, ChainSpec, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +use crate::error; +use crate::VersionInfo; +use crate::params::SharedParams; + +pub use crate::commands::runcmd::RunCmd; +pub use crate::commands::export_blocks_cmd::ExportBlocksCmd; +pub use crate::commands::build_spec_cmd::BuildSpecCmd; +pub use crate::commands::import_blocks_cmd::ImportBlocksCmd; +pub use crate::commands::check_block_cmd::CheckBlockCmd; +pub use crate::commands::revert_cmd::RevertCmd; +pub use crate::commands::purge_chain_cmd::PurgeChainCmd; + +/// default sub directory to store network config +const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network"; + +/// All core commands that are provided by default. +/// +/// The core commands are split into multiple subcommands and `Run` is the default subcommand. From +/// the CLI user perspective, it is not visible that `Run` is a subcommand. So, all parameters of +/// `Run` are exported as main executable parameters. +#[derive(Debug, Clone, StructOpt)] +pub enum Subcommand { + /// Build a spec.json file, outputing to stdout. + BuildSpec(build_spec_cmd::BuildSpecCmd), + + /// Export blocks to a file. + ExportBlocks(export_blocks_cmd::ExportBlocksCmd), + + /// Import blocks from file. + ImportBlocks(import_blocks_cmd::ImportBlocksCmd), + + /// Validate a single block. + CheckBlock(check_block_cmd::CheckBlockCmd), + + /// Revert chain to the previous state. + Revert(revert_cmd::RevertCmd), + + /// Remove the whole chain data. + PurgeChain(purge_chain_cmd::PurgeChainCmd), +} + +impl Subcommand { + /// Get the shared parameters of a `CoreParams` command + pub fn get_shared_params(&self) -> &SharedParams { + use Subcommand::*; + + match self { + BuildSpec(params) => ¶ms.shared_params, + ExportBlocks(params) => ¶ms.shared_params, + ImportBlocks(params) => ¶ms.shared_params, + CheckBlock(params) => ¶ms.shared_params, + Revert(params) => ¶ms.shared_params, + PurgeChain(params) => ¶ms.shared_params, + } + } + + /// Run any `CoreParams` command + pub fn run( + self, + config: Configuration, + builder: B, + ) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + G: RuntimeGenesis, + E: ChainSpecExtension, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + match self { + Subcommand::BuildSpec(cmd) => cmd.run(config), + Subcommand::ExportBlocks(cmd) => cmd.run(config, builder), + Subcommand::ImportBlocks(cmd) => cmd.run(config, builder), + Subcommand::CheckBlock(cmd) => cmd.run(config, builder), + Subcommand::PurgeChain(cmd) => cmd.run(config), + Subcommand::Revert(cmd) => cmd.run(config, builder), + } + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + match self { + Subcommand::BuildSpec(cmd) => cmd.update_config(&mut config, spec_factory, version), + Subcommand::ExportBlocks(cmd) => cmd.update_config(&mut config, spec_factory, version), + Subcommand::ImportBlocks(cmd) => cmd.update_config(&mut config, spec_factory, version), + Subcommand::CheckBlock(cmd) => cmd.update_config(&mut config, spec_factory, version), + Subcommand::PurgeChain(cmd) => cmd.update_config(&mut config, spec_factory, version), + Subcommand::Revert(cmd) => cmd.update_config(&mut config, spec_factory, version), + } + } + + /// Initialize substrate. This must be done only once. + /// + /// This method: + /// + /// 1. Set the panic handler + /// 2. Raise the FD limit + /// 3. Initialize the logger + pub fn init(&self, version: &VersionInfo) -> error::Result<()> { + self.get_shared_params().init(version) + } +} diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..b7c559e5cc359eec154a250a4c41cb2b8f5bd70d --- /dev/null +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -0,0 +1,106 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::fmt::Debug; +use std::io::{Write, self}; +use std::fs; +use structopt::StructOpt; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ChainSpec, + config::{DatabaseConfig}, +}; + +use crate::error; +use crate::VersionInfo; +use crate::params::SharedParams; + +/// The `purge-chain` command used to remove the whole chain. +#[derive(Debug, StructOpt, Clone)] +pub struct PurgeChainCmd { + /// Skip interactive prompt by answering yes automatically. + #[structopt(short = "y")] + pub yes: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, +} + +impl PurgeChainCmd { + /// Run the purge command + pub fn run( + self, + config: Configuration, + ) -> error::Result<()> + where + G: RuntimeGenesis, + E: ChainSpecExtension, + { + let db_path = match config.expect_database() { + DatabaseConfig::Path { path, .. } => path, + _ => { + eprintln!("Cannot purge custom database implementation"); + return Ok(()); + } + }; + + if !self.yes { + print!("Are you sure to remove {:?}? [y/N]: ", &db_path); + io::stdout().flush().expect("failed to flush stdout"); + + let mut input = String::new(); + io::stdin().read_line(&mut input)?; + let input = input.trim(); + + match input.chars().nth(0) { + Some('y') | Some('Y') => {}, + _ => { + println!("Aborted"); + return Ok(()); + }, + } + } + + match fs::remove_dir_all(&db_path) { + Ok(_) => { + println!("{:?} removed.", &db_path); + Ok(()) + }, + Err(ref err) if err.kind() == io::ErrorKind::NotFound => { + eprintln!("{:?} did not exist.", &db_path); + Ok(()) + }, + Err(err) => Result::Err(err.into()) + } + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + config.use_in_memory_keystore()?; + + Ok(()) + } +} diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..f0c534898effd33ac33b979c322a98d78b3877cd --- /dev/null +++ b/client/cli/src/commands/revert_cmd.rs @@ -0,0 +1,83 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::fmt::Debug; +use structopt::StructOpt; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, ChainSpec, Roles, +}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; + +use crate::error; +use crate::VersionInfo; +use crate::params::{BlockNumber, SharedParams, PruningParams}; + +/// The `revert` command used revert the chain to a previous state. +#[derive(Debug, StructOpt, Clone)] +pub struct RevertCmd { + /// Number of blocks to revert. + #[structopt(default_value = "256")] + pub num: BlockNumber, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, +} + +impl RevertCmd { + /// Run the revert command + pub fn run( + self, + config: Configuration, + builder: B, + ) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + G: RuntimeGenesis, + E: ChainSpecExtension, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let blocks = self.num.parse()?; + builder(config)?.revert_chain(blocks)?; + + Ok(()) + } + + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + self.pruning_params.update_config(&mut config, Roles::FULL, true)?; + config.use_in_memory_keystore()?; + + Ok(()) + } +} diff --git a/client/cli/src/commands/runcmd.rs b/client/cli/src/commands/runcmd.rs new file mode 100644 index 0000000000000000000000000000000000000000..2a070b27b8b8518f7037271ca30230c23605b160 --- /dev/null +++ b/client/cli/src/commands/runcmd.rs @@ -0,0 +1,736 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::path::PathBuf; +use std::net::SocketAddr; +use std::fs; +use log::info; +use structopt::{StructOpt, clap::arg_enum}; +use names::{Generator, Name}; +use regex::Regex; +use chrono::prelude::*; +use sc_service::{ + AbstractService, Configuration, ChainSpecExtension, RuntimeGenesis, ChainSpec, Roles, + config::{KeystoreConfig, PrometheusConfig}, +}; +use sc_telemetry::TelemetryEndpoints; + +use crate::VersionInfo; +use crate::error; +use crate::params::ImportParams; +use crate::params::SharedParams; +use crate::params::NetworkConfigurationParams; +use crate::params::TransactionPoolParams; +use crate::runtime::run_service_until_exit; + +/// The maximum number of characters for a node name. +const NODE_NAME_MAX_LENGTH: usize = 32; + +/// default sub directory for the key store +const DEFAULT_KEYSTORE_CONFIG_PATH : &'static str = "keystore"; + +arg_enum! { + /// Whether off-chain workers are enabled. + #[allow(missing_docs)] + #[derive(Debug, Clone)] + pub enum OffchainWorkerEnabled { + Always, + Never, + WhenValidating, + } +} + +/// The `run` command used to run a node. +#[derive(Debug, StructOpt, Clone)] +pub struct RunCmd { + /// Enable validator mode. + /// + /// The node will be started with the authority role and actively + /// participate in any consensus task that it can (e.g. depending on + /// availability of local keys). + #[structopt( + long = "validator", + conflicts_with_all = &[ "sentry" ] + )] + pub validator: bool, + + /// Enable sentry mode. + /// + /// The node will be started with the authority role and participate in + /// consensus tasks as an "observer", it will never actively participate + /// regardless of whether it could (e.g. keys are available locally). This + /// mode is useful as a secure proxy for validators (which would run + /// detached from the network), since we want this node to participate in + /// the full consensus protocols in order to have all needed consensus data + /// available to relay to private nodes. + #[structopt( + long = "sentry", + conflicts_with_all = &[ "validator", "light" ] + )] + pub sentry: bool, + + /// Disable GRANDPA voter when running in validator mode, otherwise disables the GRANDPA observer. + #[structopt(long = "no-grandpa")] + pub no_grandpa: bool, + + /// Experimental: Run in light client mode. + #[structopt(long = "light", conflicts_with = "sentry")] + pub light: bool, + + /// Listen to all RPC interfaces. + /// + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy + /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. + #[structopt(long = "rpc-external")] + pub rpc_external: bool, + + /// Listen to all RPC interfaces. + /// + /// Same as `--rpc-external`. + #[structopt(long = "unsafe-rpc-external")] + pub unsafe_rpc_external: bool, + + /// Listen to all Websocket interfaces. + /// + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy + /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. + #[structopt(long = "ws-external")] + pub ws_external: bool, + + /// Listen to all Websocket interfaces. + /// + /// Same as `--ws-external` but doesn't warn you about it. + #[structopt(long = "unsafe-ws-external")] + pub unsafe_ws_external: bool, + + /// Listen to all Prometheus data source interfaces. + /// + /// Default is local. + #[structopt(long = "prometheus-external")] + pub prometheus_external: bool, + + /// Specify HTTP RPC server TCP port. + #[structopt(long = "rpc-port", value_name = "PORT")] + pub rpc_port: Option, + + /// Specify WebSockets RPC server TCP port. + #[structopt(long = "ws-port", value_name = "PORT")] + pub ws_port: Option, + + /// Maximum number of WS RPC server connections. + #[structopt(long = "ws-max-connections", value_name = "COUNT")] + pub ws_max_connections: Option, + + /// Specify browser Origins allowed to access the HTTP & WS RPC servers. + /// + /// A comma-separated list of origins (protocol://domain or special `null` + /// value). Value of `all` will disable origin validation. Default is to + /// allow localhost and https://polkadot.js.org origins. When running in + /// --dev mode the default is to allow all origins. + #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] + pub rpc_cors: Option, + + /// Specify Prometheus data source server TCP Port. + #[structopt(long = "prometheus-port", value_name = "PORT")] + pub prometheus_port: Option, + + /// Do not expose a Prometheus metric endpoint. + /// + /// Prometheus metric endpoint is enabled by default. + #[structopt(long = "no-prometheus")] + pub no_prometheus: bool, + + /// The human-readable name for this node. + /// + /// The node name will be reported to the telemetry server, if enabled. + #[structopt(long = "name", value_name = "NAME")] + pub name: Option, + + /// Disable connecting to the Substrate telemetry server. + /// + /// Telemetry is on by default on global chains. + #[structopt(long = "no-telemetry")] + pub no_telemetry: bool, + + /// The URL of the telemetry server to connect to. + /// + /// This flag can be passed multiple times as a mean to specify multiple + /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting + /// the least verbosity. If no verbosity level is specified the default is + /// 0. + #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] + pub telemetry_endpoints: Vec<(String, u8)>, + + /// Should execute offchain workers on every block. + /// + /// By default it's only enabled for nodes that are authoring new blocks. + #[structopt( + long = "offchain-worker", + value_name = "ENABLED", + possible_values = &OffchainWorkerEnabled::variants(), + case_insensitive = true, + default_value = "WhenValidating" + )] + pub offchain_worker: OffchainWorkerEnabled, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_config: NetworkConfigurationParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub pool_config: TransactionPoolParams, + + /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. + #[structopt(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] + pub alice: bool, + + /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] + pub bob: bool, + + /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] + pub charlie: bool, + + /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] + pub dave: bool, + + /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] + pub eve: bool, + + /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] + pub ferdie: bool, + + /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] + pub one: bool, + + /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] + pub two: bool, + + /// Enable authoring even when offline. + #[structopt(long = "force-authoring")] + pub force_authoring: bool, + + /// Specify custom keystore path. + #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] + pub keystore_path: Option, + + /// Use interactive shell for entering the password used by the keystore. + #[structopt( + long = "password-interactive", + conflicts_with_all = &[ "password", "password-filename" ] + )] + pub password_interactive: bool, + + /// Password used by the keystore. + #[structopt( + long = "password", + conflicts_with_all = &[ "password-interactive", "password-filename" ] + )] + pub password: Option, + + /// File that contains the password used by the keystore. + #[structopt( + long = "password-filename", + value_name = "PATH", + parse(from_os_str), + conflicts_with_all = &[ "password-interactive", "password" ] + )] + pub password_filename: Option +} + +impl RunCmd { + /// Get the `Sr25519Keyring` matching one of the flag + pub fn get_keyring(&self) -> Option { + use sp_keyring::Sr25519Keyring::*; + + if self.alice { Some(Alice) } + else if self.bob { Some(Bob) } + else if self.charlie { Some(Charlie) } + else if self.dave { Some(Dave) } + else if self.eve { Some(Eve) } + else if self.ferdie { Some(Ferdie) } + else if self.one { Some(One) } + else if self.two { Some(Two) } + else { None } + } + + /// Update and prepare a `Configuration` with command line parameters of `RunCmd` and `VersionInfo` + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<()> + where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; + + let password = if self.password_interactive { + #[cfg(not(target_os = "unknown"))] + { + Some(input_keystore_password()?.into()) + } + #[cfg(target_os = "unknown")] + None + } else if let Some(ref file) = self.password_filename { + Some(fs::read_to_string(file).map_err(|e| format!("{}", e))?.into()) + } else if let Some(ref password) = self.password { + Some(password.clone().into()) + } else { + None + }; + + let path = self.keystore_path.clone().or( + config.in_chain_config_dir(DEFAULT_KEYSTORE_CONFIG_PATH) + ); + + config.keystore = KeystoreConfig::Path { + path: path.ok_or_else(|| "No `base_path` provided to create keystore path!".to_string())?, + password, + }; + + let keyring = self.get_keyring(); + let is_dev = self.shared_params.dev; + let is_light = self.light; + let is_authority = (self.validator || self.sentry || is_dev || keyring.is_some()) + && !is_light; + let role = + if is_light { + sc_service::Roles::LIGHT + } else if is_authority { + sc_service::Roles::AUTHORITY + } else { + sc_service::Roles::FULL + }; + + self.import_params.update_config(&mut config, role, is_dev)?; + + config.name = match (self.name.as_ref(), keyring) { + (Some(name), _) => name.to_string(), + (_, Some(keyring)) => keyring.to_string(), + (None, None) => generate_node_name(), + }; + if let Err(msg) = is_node_name_valid(&config.name) { + return Err(error::Error::Input( + format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", + config.name, + msg, + ) + )); + } + + // set sentry mode (i.e. act as an authority but **never** actively participate) + config.sentry_mode = self.sentry; + + config.offchain_worker = match (&self.offchain_worker, role) { + (OffchainWorkerEnabled::WhenValidating, sc_service::Roles::AUTHORITY) => true, + (OffchainWorkerEnabled::Always, _) => true, + (OffchainWorkerEnabled::Never, _) => false, + (OffchainWorkerEnabled::WhenValidating, _) => false, + }; + + config.roles = role; + config.disable_grandpa = self.no_grandpa; + + let client_id = config.client_id(); + let network_path = config + .in_chain_config_dir(crate::commands::DEFAULT_NETWORK_CONFIG_PATH) + .expect("We provided a basepath"); + self.network_config.update_config( + &mut config, + network_path, + client_id, + is_dev, + )?; + + self.pool_config.update_config(&mut config)?; + + config.dev_key_seed = keyring + .map(|a| format!("//{}", a)).or_else(|| { + if is_dev && !is_light { + Some("//Alice".into()) + } else { + None + } + }); + + if config.rpc_http.is_none() || self.rpc_port.is_some() { + let rpc_interface: &str = interface_str(self.rpc_external, self.unsafe_rpc_external, self.validator)?; + config.rpc_http = Some(parse_address(&format!("{}:{}", rpc_interface, 9933), self.rpc_port)?); + } + if config.rpc_ws.is_none() || self.ws_port.is_some() { + let ws_interface: &str = interface_str(self.ws_external, self.unsafe_ws_external, self.validator)?; + config.rpc_ws = Some(parse_address(&format!("{}:{}", ws_interface, 9944), self.ws_port)?); + } + + config.rpc_ws_max_connections = self.ws_max_connections; + config.rpc_cors = self.rpc_cors.clone().unwrap_or_else(|| if is_dev { + log::warn!("Running in --dev mode, RPC CORS has been disabled."); + Cors::All + } else { + Cors::List(vec![ + "http://localhost:*".into(), + "http://127.0.0.1:*".into(), + "https://localhost:*".into(), + "https://127.0.0.1:*".into(), + "https://polkadot.js.org".into(), + ]) + }).into(); + + // Override telemetry + if self.no_telemetry { + config.telemetry_endpoints = None; + } else if !self.telemetry_endpoints.is_empty() { + config.telemetry_endpoints = Some( + TelemetryEndpoints::new(self.telemetry_endpoints.clone()) + ); + } + + // Override prometheus + if self.no_prometheus { + config.prometheus_config = None; + } else if config.prometheus_config.is_none() { + let prometheus_interface: &str = if self.prometheus_external { "0.0.0.0" } else { "127.0.0.1" }; + config.prometheus_config = Some(PrometheusConfig::new_with_default_registry( + parse_address(&format!("{}:{}", prometheus_interface, 9615), self.prometheus_port)?, + )); + } + + config.tracing_targets = self.import_params.tracing_targets.clone().into(); + config.tracing_receiver = self.import_params.tracing_receiver.clone().into(); + + // Imply forced authoring on --dev + config.force_authoring = self.shared_params.dev || self.force_authoring; + + Ok(()) + } + + /// Run the command that runs the node + pub fn run( + self, + config: Configuration, + new_light: FNL, + new_full: FNF, + version: &VersionInfo, + ) -> error::Result<()> + where + G: RuntimeGenesis, + E: ChainSpecExtension, + FNL: FnOnce(Configuration) -> Result, + FNF: FnOnce(Configuration) -> Result, + SL: AbstractService + Unpin, + SF: AbstractService + Unpin, + { + info!("{}", version.name); + info!(" version {}", config.full_version()); + info!(" by {}, {}-{}", version.author, version.copyright_start_year, Local::today().year()); + info!("Chain specification: {}", config.expect_chain_spec().name()); + info!("Node name: {}", config.name); + info!("Roles: {}", config.display_role()); + + match config.roles { + Roles::LIGHT => run_service_until_exit( + config, + new_light, + ), + _ => run_service_until_exit( + config, + new_full, + ), + } + } + + /// Initialize substrate. This must be done only once. + /// + /// This method: + /// + /// 1. Set the panic handler + /// 2. Raise the FD limit + /// 3. Initialize the logger + pub fn init(&self, version: &VersionInfo) -> error::Result<()> { + self.shared_params.init(version) + } +} + +/// Check whether a node name is considered as valid +pub fn is_node_name_valid(_name: &str) -> Result<(), &str> { + let name = _name.to_string(); + if name.chars().count() >= NODE_NAME_MAX_LENGTH { + return Err("Node name too long"); + } + + let invalid_chars = r"[\\.@]"; + let re = Regex::new(invalid_chars).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain invalid chars such as '.' and '@'"); + } + + let invalid_patterns = r"(https?:\\/+)?(www)+"; + let re = Regex::new(invalid_patterns).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain urls"); + } + + Ok(()) +} + +#[cfg(not(target_os = "unknown"))] +fn input_keystore_password() -> Result { + rpassword::read_password_from_tty(Some("Keystore password: ")) + .map_err(|e| format!("{:?}", e)) +} + +fn generate_node_name() -> String { + let result = loop { + let node_name = Generator::with_naming(Name::Numbered).next().unwrap(); + let count = node_name.chars().count(); + + if count < NODE_NAME_MAX_LENGTH { + break node_name + } + }; + + result +} + +fn parse_address( + address: &str, + port: Option, +) -> Result { + let mut address: SocketAddr = address.parse().map_err( + |_| format!("Invalid address: {}", address) + )?; + if let Some(port) = port { + address.set_port(port); + } + + Ok(address) +} + +fn interface_str( + is_external: bool, + is_unsafe_external: bool, + is_validator: bool, +) -> Result<&'static str, error::Error> { + if is_external && is_validator { + return Err(error::Error::Input("--rpc-external and --ws-external options shouldn't be \ + used if the node is running as a validator. Use `--unsafe-rpc-external` if you understand \ + the risks. See the options description for more information.".to_owned())); + } + + if is_external || is_unsafe_external { + log::warn!("It isn't safe to expose RPC publicly without a proxy server that filters \ + available set of RPC methods."); + + Ok("0.0.0.0") + } else { + Ok("127.0.0.1") + } +} + +/// Default to verbosity level 0, if none is provided. +fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), Box> { + let pos = s.find(' '); + match pos { + None => { + Ok((s.to_owned(), 0)) + }, + Some(pos_) => { + let verbosity = s[pos_ + 1..].parse()?; + let url = s[..pos_].parse()?; + Ok((url, verbosity)) + } + } +} + +/// CORS setting +/// +/// The type is introduced to overcome `Option>` +/// handling of `structopt`. +#[derive(Clone, Debug)] +pub enum Cors { + /// All hosts allowed + All, + /// Only hosts on the list are allowed. + List(Vec), +} + +impl From for Option> { + fn from(cors: Cors) -> Self { + match cors { + Cors::All => None, + Cors::List(list) => Some(list), + } + } +} + +/// Parse cors origins +fn parse_cors(s: &str) -> Result> { + let mut is_all = false; + let mut origins = Vec::new(); + for part in s.split(',') { + match part { + "all" | "*" => { + is_all = true; + break; + }, + other => origins.push(other.to_owned()), + } + } + + Ok(if is_all { Cors::All } else { Cors::List(origins) }) +} + +#[cfg(test)] +mod tests { + use super::*; + use sc_service::config::DatabaseConfig; + + const TEST_VERSION_INFO: &'static VersionInfo = &VersionInfo { + name: "node-test", + version: "0.1.0", + commit: "some_commit", + executable_name: "node-test", + description: "description", + author: "author", + support_url: "http://example.org", + copyright_start_year: 2020, + }; + + #[test] + fn tests_node_name_good() { + assert!(is_node_name_valid("short name").is_ok()); + } + + #[test] + fn tests_node_name_bad() { + assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); + assert!(is_node_name_valid("Dots.not.Ok").is_err()); + assert!(is_node_name_valid("http://visit.me").is_err()); + assert!(is_node_name_valid("https://visit.me").is_err()); + assert!(is_node_name_valid("www.visit.me").is_err()); + assert!(is_node_name_valid("email@domain").is_err()); + } + + #[test] + fn keystore_path_is_generated_correctly() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + Vec::new(), + None, + None, + None, + None::<()>, + ); + + for keystore_path in vec![None, Some("/keystore/path")] { + let args: Vec<&str> = vec![]; + let mut cli = RunCmd::from_iter(args); + cli.keystore_path = keystore_path.clone().map(PathBuf::from); + + let mut config = Configuration::default(); + config.config_dir = Some(PathBuf::from("/test/path")); + config.chain_spec = Some(chain_spec.clone()); + let chain_spec = chain_spec.clone(); + cli.update_config(&mut config, move |_| Ok(Some(chain_spec)), TEST_VERSION_INFO).unwrap(); + + let expected_path = match keystore_path { + Some(path) => PathBuf::from(path), + None => PathBuf::from("/test/path/chains/test-id/keystore"), + }; + + assert_eq!(expected_path, config.keystore.path().unwrap().to_owned()); + } + } + + #[test] + fn ensure_load_spec_provide_defaults() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + vec!["boo".to_string()], + Some(TelemetryEndpoints::new(vec![("foo".to_string(), 42)])), + None, + None, + None::<()>, + ); + + let args: Vec<&str> = vec![]; + let cli = RunCmd::from_iter(args); + + let mut config = Configuration::from_version(TEST_VERSION_INFO); + cli.update_config(&mut config, |_| Ok(Some(chain_spec)), TEST_VERSION_INFO).unwrap(); + + assert!(config.chain_spec.is_some()); + assert!(!config.network.boot_nodes.is_empty()); + assert!(config.telemetry_endpoints.is_some()); + } + + #[test] + fn ensure_update_config_for_running_node_provides_defaults() { + let chain_spec = ChainSpec::from_genesis( + "test", + "test-id", + || (), + vec![], + None, + None, + None, + None::<()>, + ); + + let args: Vec<&str> = vec![]; + let cli = RunCmd::from_iter(args); + + let mut config = Configuration::from_version(TEST_VERSION_INFO); + cli.init(&TEST_VERSION_INFO).unwrap(); + cli.update_config(&mut config, |_| Ok(Some(chain_spec)), TEST_VERSION_INFO).unwrap(); + + assert!(config.config_dir.is_some()); + assert!(config.database.is_some()); + if let Some(DatabaseConfig::Path { ref cache_size, .. }) = config.database { + assert!(cache_size.is_some()); + } else { + panic!("invalid config.database variant"); + } + assert!(!config.name.is_empty()); + assert!(config.network.config_path.is_some()); + assert!(!config.network.listen_addresses.is_empty()); + } +} diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index 074cb353c3a0a6d2a8cbd7653a156e1a169de04c..edc1adecc762c5f70c11958d1b6f9b0b93863904 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -49,6 +49,12 @@ impl std::convert::From for Error { } } +impl std::convert::From<&str> for Error { + fn from(s: &str) -> Error { + Error::Input(s.to_string()) + } +} + impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 6259c0a21b3146d3a04ba842896d4c155b55ed0e..e28edebd60def53ba34497d416a9bea7919ef983 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -19,127 +19,33 @@ #![warn(missing_docs)] #![warn(unused_extern_crates)] -#[macro_use] -mod traits; mod params; -mod execution_strategy; -pub mod error; +mod arg_enums; +mod error; mod runtime; -mod node_key; - -use sc_client_api::execution_extensions::ExecutionStrategies; -use sc_service::{ - config::{Configuration, DatabaseConfig, KeystoreConfig}, - ServiceBuilderCommand, - RuntimeGenesis, ChainSpecExtension, PruningMode, ChainSpec, - AbstractService, Roles as ServiceRoles, -}; +mod commands; + pub use sc_service::config::VersionInfo; -use sc_network::{ - self, - multiaddr::Protocol, - config::{ - NetworkConfiguration, TransportConfig, NonReservedPeerMode, - }, -}; - -use std::{ - io::Write, iter, fmt::Debug, fs, - net::{Ipv4Addr, SocketAddr}, path::PathBuf, -}; + +use std::io::Write; use regex::Regex; -use structopt::{StructOpt, clap}; +use structopt::{StructOpt, clap::{self, AppSettings}}; pub use structopt; -use params::{ - NetworkConfigurationParams, TransactionPoolParams, Cors, -}; -pub use params::{ - SharedParams, ImportParams, ExecutionStrategy, Subcommand, RunCmd, BuildSpecCmd, - ExportBlocksCmd, ImportBlocksCmd, CheckBlockCmd, PurgeChainCmd, RevertCmd, - WasmExecutionMethod, -}; -pub use traits::GetSharedParams; -use app_dirs::{AppInfo, AppDataType}; +pub use params::*; +pub use commands::*; +pub use arg_enums::*; +pub use error::*; use log::info; use lazy_static::lazy_static; -use sc_telemetry::TelemetryEndpoints; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; pub use crate::runtime::{run_until_exit, run_service_until_exit}; -use execution_strategy::*; -use names::{Generator, Name}; -use chrono::prelude::*; - -/// default sub directory to store network config -const DEFAULT_NETWORK_CONFIG_PATH : &'static str = "network"; -/// default sub directory to store database -const DEFAULT_DB_CONFIG_PATH : &'static str = "db"; -/// default sub directory for the key store -const DEFAULT_KEYSTORE_CONFIG_PATH : &'static str = "keystore"; - -/// The maximum number of characters for a node name. -const NODE_NAME_MAX_LENGTH: usize = 32; - -fn get_chain_key(cli: &SharedParams) -> String { - match cli.chain { - Some(ref chain) => chain.clone(), - None => if cli.dev { "dev".into() } else { "".into() } - } -} - -fn generate_node_name() -> String { - let result = loop { - let node_name = Generator::with_naming(Name::Numbered).next().unwrap(); - let count = node_name.chars().count(); - - if count < NODE_NAME_MAX_LENGTH { - break node_name - } - }; - - result -} - -/// Load spec to `Configuration` from shared params and spec factory. -pub fn load_spec<'a, G, E, F>( - mut config: &'a mut Configuration, - cli: &SharedParams, - factory: F, -) -> error::Result<&'a ChainSpec> where - G: RuntimeGenesis, - E: ChainSpecExtension, - F: FnOnce(&str) -> Result>, String>, -{ - let chain_key = get_chain_key(cli); - let spec = match factory(&chain_key)? { - Some(spec) => spec, - None => ChainSpec::from_json_file(PathBuf::from(chain_key))? - }; - - config.network.boot_nodes = spec.boot_nodes().to_vec(); - config.telemetry_endpoints = spec.telemetry_endpoints().clone(); - - config.chain_spec = Some(spec); - - Ok(config.chain_spec.as_ref().unwrap()) -} - -fn base_path(cli: &SharedParams, version: &VersionInfo) -> PathBuf { - cli.base_path.clone() - .unwrap_or_else(|| - app_dirs::get_app_root( - AppDataType::UserData, - &AppInfo { - name: version.executable_name, - author: version.author - } - ).expect("app directories exist on all supported platforms; qed") - ) -} /// Helper function used to parse the command line arguments. This is the equivalent of -/// `structopt`'s `from_args()` except that it takes a `VersionInfo` argument to provide the name of -/// the application, author, "about" and version. +/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of +/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. +/// +/// To allow running the node without subcommand, tt also sets a few more settings: +/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. /// /// Gets the struct from the command line arguments. Print the /// error message and quit the program in case of failure. @@ -152,7 +58,10 @@ where /// Helper function used to parse the command line arguments. This is the equivalent of /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of -/// the application, author, "about" and version. +/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. +/// +/// To allow running the node without subcommand, tt also sets a few more settings: +/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. /// /// Gets the struct from any iterator such as a `Vec` of your making. /// Print the error message and quit the program in case of failure. @@ -174,14 +83,22 @@ where .name(version.executable_name) .author(version.author) .about(version.description) - .version(full_version.as_str()); + .version(full_version.as_str()) + .settings(&[ + AppSettings::GlobalVersion, + AppSettings::ArgsNegateSubcommands, + AppSettings::SubcommandsNegateReqs, + ]); T::from_clap(&app.get_matches_from(iter)) } /// Helper function used to parse the command line arguments. This is the equivalent of -/// `structopt`'s `try_from_iter()` except that it takes a `VersionInfo` argument to provide the -/// name of the application, author, "about" and version. +/// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of +/// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. +/// +/// To allow running the node without subcommand, tt also sets a few more settings: +/// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. /// /// Gets the struct from any iterator such as a `Vec` of your making. /// Print the error message and quit the program in case of failure. @@ -215,53 +132,6 @@ where Ok(T::from_clap(&matches)) } -/// A helper function that initializes and runs the node -pub fn run( - mut config: Configuration, - run_cmd: RunCmd, - new_light: FNL, - new_full: FNF, - spec_factory: F, - version: &VersionInfo, -) -> error::Result<()> -where - F: FnOnce(&str) -> Result>, String>, - FNL: FnOnce(Configuration) -> Result, - FNF: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, -{ - init(&run_cmd.shared_params, version)?; - init_config(&mut config, &run_cmd.shared_params, version, spec_factory)?; - run_cmd.run(config, new_light, new_full, version) -} - -/// A helper function that initializes and runs any of the subcommand variants of `CoreParams`. -pub fn run_subcommand( - mut config: Configuration, - subcommand: Subcommand, - spec_factory: F, - builder: B, - version: &VersionInfo, -) -> error::Result<()> -where - F: FnOnce(&str) -> Result>, String>, - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, -{ - let shared_params = subcommand.get_shared_params(); - init(shared_params, version)?; - init_config(&mut config, shared_params, version, spec_factory)?; - subcommand.run(config, builder) -} - /// Initialize substrate. This must be done only once. /// /// This method: @@ -269,7 +139,7 @@ where /// 1. Set the panic handler /// 2. Raise the FD limit /// 3. Initialize the logger -pub fn init(shared_params: &SharedParams, version: &VersionInfo) -> error::Result<()> { +pub fn init(logger_pattern: &str, version: &VersionInfo) -> error::Result<()> { let full_version = sc_service::config::full_version_from_strs( version.version, version.commit @@ -277,425 +147,11 @@ pub fn init(shared_params: &SharedParams, version: &VersionInfo) -> error::Resul sp_panic_handler::set(version.support_url, &full_version); fdlimit::raise_fd_limit(); - init_logger(shared_params.log.as_ref().map(|v| v.as_ref()).unwrap_or("")); - - Ok(()) -} - -/// Initialize the given `config`. -/// -/// This will load the chain spec, set the `config_dir` and the `database_dir`. -pub fn init_config( - config: &mut Configuration, - shared_params: &SharedParams, - version: &VersionInfo, - spec_factory: F, -) -> error::Result<()> where - F: FnOnce(&str) -> Result>, String>, - G: RuntimeGenesis, - E: ChainSpecExtension, -{ - load_spec(config, shared_params, spec_factory)?; - - if config.config_dir.is_none() { - config.config_dir = Some(base_path(&shared_params, version)); - } - - if config.database.is_none() { - config.database = Some(DatabaseConfig::Path { - path: config - .in_chain_config_dir(DEFAULT_DB_CONFIG_PATH) - .expect("We provided a base_path/config_dir."), - cache_size: None, - }); - } - - Ok(()) -} - -/// Run the node -/// -/// Builds and runs either a full or a light node, depending on the `role` within the `Configuration`. -pub fn run_node( - config: Configuration, - new_light: FNL, - new_full: FNF, - version: &VersionInfo, -) -> error::Result<()> -where - FNL: FnOnce(Configuration) -> Result, - FNF: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, -{ - info!("{}", version.name); - info!(" version {}", config.full_version()); - info!(" by {}, {}-{}", version.author, version.copyright_start_year, Local::today().year()); - info!("Chain specification: {}", config.expect_chain_spec().name()); - info!("Node name: {}", config.name); - info!("Roles: {}", display_role(&config)); - - match config.roles { - ServiceRoles::LIGHT => run_service_until_exit( - config, - new_light, - ), - _ => run_service_until_exit( - config, - new_full, - ), - } -} - -/// Returns a string displaying the node role, special casing the sentry mode -/// (returning `SENTRY`), since the node technically has an `AUTHORITY` role but -/// doesn't participate. -pub fn display_role(config: &Configuration) -> String { - if config.sentry_mode { - "SENTRY".to_string() - } else { - format!("{:?}", config.roles) - } -} - -/// Fill the given `PoolConfiguration` by looking at the cli parameters. -fn fill_transaction_pool_configuration( - options: &mut Configuration, - params: TransactionPoolParams, -) -> error::Result<()> { - // ready queue - options.transaction_pool.ready.count = params.pool_limit; - options.transaction_pool.ready.total_bytes = params.pool_kbytes * 1024; - - // future queue - let factor = 10; - options.transaction_pool.future.count = params.pool_limit / factor; - options.transaction_pool.future.total_bytes = params.pool_kbytes * 1024 / factor; + init_logger(logger_pattern); Ok(()) } -/// Fill the given `NetworkConfiguration` by looking at the cli parameters. -fn fill_network_configuration( - cli: NetworkConfigurationParams, - config_path: PathBuf, - config: &mut NetworkConfiguration, - client_id: String, - is_dev: bool, -) -> error::Result<()> { - config.boot_nodes.extend(cli.bootnodes.into_iter()); - config.config_path = Some(config_path.to_string_lossy().into()); - config.net_config_path = config.config_path.clone(); - - config.reserved_nodes.extend(cli.reserved_nodes.into_iter()); - if cli.reserved_only { - config.non_reserved_mode = NonReservedPeerMode::Deny; - } - - config.sentry_nodes.extend(cli.sentry_nodes.into_iter()); - - for addr in cli.listen_addr.iter() { - let addr = addr.parse().ok().ok_or(error::Error::InvalidListenMultiaddress)?; - config.listen_addresses.push(addr); - } - - if config.listen_addresses.is_empty() { - let port = match cli.port { - Some(port) => port, - None => 30333, - }; - - config.listen_addresses = vec![ - iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) - .chain(iter::once(Protocol::Tcp(port))) - .collect() - ]; - } - - config.client_version = client_id; - config.node_key = node_key::node_key_config(cli.node_key_params, &config.net_config_path)?; - - config.in_peers = cli.in_peers; - config.out_peers = cli.out_peers; - - config.transport = TransportConfig::Normal { - enable_mdns: !is_dev && !cli.no_mdns, - allow_private_ipv4: !cli.no_private_ipv4, - wasm_external_transport: None, - use_yamux_flow_control: cli.use_yamux_flow_control - }; - - config.max_parallel_downloads = cli.max_parallel_downloads; - - Ok(()) -} - -#[cfg(not(target_os = "unknown"))] -fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e)) -} - -/// Use in memory keystore config when it is not required at all. -pub fn fill_config_keystore_in_memory(config: &mut sc_service::Configuration) - -> Result<(), String> -{ - match &mut config.keystore { - cfg @ KeystoreConfig::None => { *cfg = KeystoreConfig::InMemory; Ok(()) }, - _ => Err("Keystore config specified when it should not be!".into()), - } -} - -/// Fill the password field of the given config instance. -fn fill_config_keystore_password_and_path( - config: &mut sc_service::Configuration, - cli: &RunCmd, -) -> Result<(), String> { - let password = if cli.password_interactive { - #[cfg(not(target_os = "unknown"))] - { - Some(input_keystore_password()?.into()) - } - #[cfg(target_os = "unknown")] - None - } else if let Some(ref file) = cli.password_filename { - Some(fs::read_to_string(file).map_err(|e| format!("{}", e))?.into()) - } else if let Some(ref password) = cli.password { - Some(password.clone().into()) - } else { - None - }; - - let path = cli.keystore_path.clone().or( - config.in_chain_config_dir(DEFAULT_KEYSTORE_CONFIG_PATH) - ); - - config.keystore = KeystoreConfig::Path { - path: path.ok_or_else(|| "No `base_path` provided to create keystore path!")?, - password, - }; - - Ok(()) -} - -/// Put block import CLI params into `config` object. -pub fn fill_import_params( - config: &mut Configuration, - cli: &ImportParams, - role: sc_service::Roles, - is_dev: bool, -) -> error::Result<()> -where - G: RuntimeGenesis, -{ - if let Some(DatabaseConfig::Path { ref mut cache_size, .. }) = config.database { - *cache_size = Some(cli.database_cache_size); - } - - config.state_cache_size = cli.state_cache_size; - - // by default we disable pruning if the node is an authority (i.e. - // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the - // node is an authority and pruning is enabled explicitly, then we error - // unless `unsafe_pruning` is set. - config.pruning = match &cli.pruning { - Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role == sc_service::Roles::AUTHORITY => PruningMode::ArchiveAll, - None => PruningMode::default(), - Some(s) => { - if role == sc_service::Roles::AUTHORITY && !cli.unsafe_pruning { - return Err(error::Error::Input( - "Validators should run with state pruning disabled (i.e. archive). \ - You can ignore this check with `--unsafe-pruning`.".to_string() - )); - } - - PruningMode::keep_blocks(s.parse() - .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))? - ) - }, - }; - - config.wasm_method = cli.wasm_method.into(); - - let exec = &cli.execution_strategies; - let exec_all_or = |strat: ExecutionStrategy, default: ExecutionStrategy| { - exec.execution.unwrap_or(if strat == default && is_dev { - ExecutionStrategy::Native - } else { - strat - }).into() - }; - - config.execution_strategies = ExecutionStrategies { - syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), - importing: exec_all_or(exec.execution_import_block, DEFAULT_EXECUTION_IMPORT_BLOCK), - block_construction: - exec_all_or(exec.execution_block_construction, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION), - offchain_worker: - exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), - other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), - }; - Ok(()) -} - -/// Update and prepare a `Configuration` with command line parameters of `RunCmd` and `VersionInfo` -pub fn update_config_for_running_node( - mut config: &mut Configuration, - cli: RunCmd, -) -> error::Result<()> -where - G: RuntimeGenesis, -{ - fill_config_keystore_password_and_path(&mut config, &cli)?; - - let keyring = cli.get_keyring(); - let is_dev = cli.shared_params.dev; - let is_light = cli.light; - let is_authority = (cli.validator || cli.sentry || is_dev || keyring.is_some()) - && !is_light; - let role = - if is_light { - sc_service::Roles::LIGHT - } else if is_authority { - sc_service::Roles::AUTHORITY - } else { - sc_service::Roles::FULL - }; - - fill_import_params(&mut config, &cli.import_params, role, is_dev)?; - - config.name = match (cli.name.as_ref(), keyring) { - (Some(name), _) => name.to_string(), - (_, Some(keyring)) => keyring.to_string(), - (None, None) => generate_node_name(), - }; - if let Err(msg) = node_key::is_node_name_valid(&config.name) { - return Err(error::Error::Input( - format!("Invalid node name '{}'. Reason: {}. If unsure, use none.", - config.name, - msg, - ) - )); - } - - // set sentry mode (i.e. act as an authority but **never** actively participate) - config.sentry_mode = cli.sentry; - - config.offchain_worker = match (cli.offchain_worker, role) { - (params::OffchainWorkerEnabled::WhenValidating, sc_service::Roles::AUTHORITY) => true, - (params::OffchainWorkerEnabled::Always, _) => true, - (params::OffchainWorkerEnabled::Never, _) => false, - (params::OffchainWorkerEnabled::WhenValidating, _) => false, - }; - - config.roles = role; - config.disable_grandpa = cli.no_grandpa; - - let client_id = config.client_id(); - fill_network_configuration( - cli.network_config, - config.in_chain_config_dir(DEFAULT_NETWORK_CONFIG_PATH).expect("We provided a basepath"), - &mut config.network, - client_id, - is_dev, - )?; - - fill_transaction_pool_configuration(&mut config, cli.pool_config)?; - - config.dev_key_seed = keyring - .map(|a| format!("//{}", a)).or_else(|| { - if is_dev && !is_light { - Some("//Alice".into()) - } else { - None - } - }); - - if config.rpc_http.is_none() || cli.rpc_port.is_some() { - let rpc_interface: &str = interface_str(cli.rpc_external, cli.unsafe_rpc_external, cli.validator)?; - config.rpc_http = Some(parse_address(&format!("{}:{}", rpc_interface, 9933), cli.rpc_port)?); - } - if config.rpc_ws.is_none() || cli.ws_port.is_some() { - let ws_interface: &str = interface_str(cli.ws_external, cli.unsafe_ws_external, cli.validator)?; - config.rpc_ws = Some(parse_address(&format!("{}:{}", ws_interface, 9944), cli.ws_port)?); - } - - if config.grafana_port.is_none() || cli.grafana_port.is_some() { - let grafana_interface: &str = if cli.grafana_external { "0.0.0.0" } else { "127.0.0.1" }; - config.grafana_port = Some( - parse_address(&format!("{}:{}", grafana_interface, 9955), cli.grafana_port)? - ); - } - - config.rpc_ws_max_connections = cli.ws_max_connections; - config.rpc_cors = cli.rpc_cors.unwrap_or_else(|| if is_dev { - log::warn!("Running in --dev mode, RPC CORS has been disabled."); - Cors::All - } else { - Cors::List(vec![ - "http://localhost:*".into(), - "http://127.0.0.1:*".into(), - "https://localhost:*".into(), - "https://127.0.0.1:*".into(), - "https://polkadot.js.org".into(), - "https://substrate-ui.parity.io".into(), - ]) - }).into(); - - // Override telemetry - if cli.no_telemetry { - config.telemetry_endpoints = None; - } else if !cli.telemetry_endpoints.is_empty() { - config.telemetry_endpoints = Some(TelemetryEndpoints::new(cli.telemetry_endpoints)); - } - - config.tracing_targets = cli.import_params.tracing_targets.into(); - config.tracing_receiver = cli.import_params.tracing_receiver.into(); - - // Imply forced authoring on --dev - config.force_authoring = cli.shared_params.dev || cli.force_authoring; - - Ok(()) -} - -fn interface_str( - is_external: bool, - is_unsafe_external: bool, - is_validator: bool, -) -> Result<&'static str, error::Error> { - if is_external && is_validator { - return Err(error::Error::Input("--rpc-external and --ws-external options shouldn't be \ - used if the node is running as a validator. Use `--unsafe-rpc-external` if you understand \ - the risks. See the options description for more information.".to_owned())); - } - - if is_external || is_unsafe_external { - log::warn!("It isn't safe to expose RPC publicly without a proxy server that filters \ - available set of RPC methods."); - - Ok("0.0.0.0") - } else { - Ok("127.0.0.1") - } -} - -fn parse_address( - address: &str, - port: Option, -) -> Result { - let mut address: SocketAddr = address.parse().map_err( - |_| format!("Invalid address: {}", address) - )?; - if let Some(port) = port { - address.set_port(port); - } - - Ok(address) -} - /// Initialize the logger pub fn init_logger(pattern: &str) { use ansi_term::Colour; @@ -765,116 +221,3 @@ fn kill_color(s: &str) -> String { } RE.replace_all(s, "").to_string() } - -#[cfg(test)] -mod tests { - use super::*; - - const TEST_VERSION_INFO: &'static VersionInfo = &VersionInfo { - name: "node-test", - version: "0.1.0", - commit: "some_commit", - executable_name: "node-test", - description: "description", - author: "author", - support_url: "http://example.org", - copyright_start_year: 2020, - }; - - #[test] - fn keystore_path_is_generated_correctly() { - let chain_spec = ChainSpec::from_genesis( - "test", - "test-id", - || (), - Vec::new(), - None, - None, - None, - None::<()>, - ); - - for keystore_path in vec![None, Some("/keystore/path")] { - let args: Vec<&str> = vec![]; - let mut run_cmds = RunCmd::from_iter(args); - run_cmds.keystore_path = keystore_path.clone().map(PathBuf::from); - - let mut node_config = Configuration::default(); - node_config.config_dir = Some(PathBuf::from("/test/path")); - node_config.chain_spec = Some(chain_spec.clone()); - update_config_for_running_node( - &mut node_config, - run_cmds.clone(), - ).unwrap(); - - let expected_path = match keystore_path { - Some(path) => PathBuf::from(path), - None => PathBuf::from("/test/path/chains/test-id/keystore"), - }; - - assert_eq!(expected_path, node_config.keystore.path().unwrap().to_owned()); - } - } - - #[test] - fn ensure_load_spec_provide_defaults() { - let chain_spec = ChainSpec::from_genesis( - "test", - "test-id", - || (), - vec!["boo".to_string()], - Some(TelemetryEndpoints::new(vec![("foo".to_string(), 42)])), - None, - None, - None::<()>, - ); - - let args: Vec<&str> = vec![]; - let cli = RunCmd::from_iter(args); - - let mut config = Configuration::new(TEST_VERSION_INFO); - load_spec(&mut config, &cli.shared_params, |_| Ok(Some(chain_spec))).unwrap(); - - assert!(config.chain_spec.is_some()); - assert!(!config.network.boot_nodes.is_empty()); - assert!(config.telemetry_endpoints.is_some()); - } - - #[test] - fn ensure_update_config_for_running_node_provides_defaults() { - let chain_spec = ChainSpec::from_genesis( - "test", - "test-id", - || (), - vec![], - None, - None, - None, - None::<()>, - ); - - let args: Vec<&str> = vec![]; - let cli = RunCmd::from_iter(args); - - let mut config = Configuration::new(TEST_VERSION_INFO); - init(&cli.shared_params, &TEST_VERSION_INFO).unwrap(); - init_config( - &mut config, - &cli.shared_params, - &TEST_VERSION_INFO, - |_| Ok(Some(chain_spec)), - ).unwrap(); - update_config_for_running_node(&mut config, cli).unwrap(); - - assert!(config.config_dir.is_some()); - assert!(config.database.is_some()); - if let Some(DatabaseConfig::Path { ref cache_size, .. }) = config.database { - assert!(cache_size.is_some()); - } else { - panic!("invalid config.database variant"); - } - assert!(!config.name.is_empty()); - assert!(config.network.config_path.is_some()); - assert!(!config.network.listen_addresses.is_empty()); - } -} diff --git a/client/cli/src/node_key.rs b/client/cli/src/node_key.rs deleted file mode 100644 index 4401481ca56ce1ca171cc8ab9c9ade5ee5874ee7..0000000000000000000000000000000000000000 --- a/client/cli/src/node_key.rs +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use sc_network::{ - self, - config::{ - NodeKeyConfig, - }, -}; -use sp_core::H256; -use regex::Regex; -use std::{path::{Path, PathBuf}, str::FromStr}; -use crate::error; -use crate::params::{NodeKeyParams, NodeKeyType}; - -/// The file name of the node's Ed25519 secret key inside the chain-specific -/// network config directory, if neither `--node-key` nor `--node-key-file` -/// is specified in combination with `--node-key-type=ed25519`. -const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; - -/// Check whether a node name is considered as valid -pub fn is_node_name_valid(_name: &str) -> Result<(), &str> { - let name = _name.to_string(); - if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); - } - - let invalid_chars = r"[\\.@]"; - let re = Regex::new(invalid_chars).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); - } - - let invalid_patterns = r"(https?:\\/+)?(www)+"; - let re = Regex::new(invalid_patterns).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain urls"); - } - - Ok(()) -} - -/// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context -/// of an optional network config storage directory. -pub fn node_key_config

(params: NodeKeyParams, net_config_dir: &Option

) - -> error::Result -where - P: AsRef -{ - match params.node_key_type { - NodeKeyType::Ed25519 => - params.node_key.as_ref().map(parse_ed25519_secret).unwrap_or_else(|| - Ok(params.node_key_file - .or_else(|| net_config_file(net_config_dir, NODE_KEY_ED25519_FILE)) - .map(sc_network::config::Secret::File) - .unwrap_or(sc_network::config::Secret::New))) - .map(NodeKeyConfig::Ed25519) - } -} - -/// Create an error caused by an invalid node key argument. -fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { - error::Error::Input(format!("Invalid node key: {}", e)) -} - -/// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. -fn parse_ed25519_secret(hex: &String) -> error::Result { - H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| - sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) - .map_err(invalid_node_key)) -} - -fn net_config_file

(net_config_dir: &Option

, name: &str) -> Option -where - P: AsRef -{ - net_config_dir.as_ref().map(|d| d.as_ref().join(name)) -} - -#[cfg(test)] -mod tests { - use sc_network::config::identity::ed25519; - use super::*; - - #[test] - fn tests_node_name_good() { - assert!(is_node_name_valid("short name").is_ok()); - } - - #[test] - fn tests_node_name_bad() { - assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); - assert!(is_node_name_valid("Dots.not.Ok").is_err()); - assert!(is_node_name_valid("http://visit.me").is_err()); - assert!(is_node_name_valid("https://visit.me").is_err()); - assert!(is_node_name_valid("www.visit.me").is_err()); - assert!(is_node_name_valid("email@domain").is_err()); - } - - #[test] - fn test_node_key_config_input() { - fn secret_input(net_config_dir: Option) -> error::Result<()> { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let sk = match node_key_type { - NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec() - }; - let params = NodeKeyParams { - node_key_type, - node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), - node_key_file: None - }; - node_key_config(params, &net_config_dir).and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) - if node_key_type == NodeKeyType::Ed25519 && - &sk[..] == ski.as_ref() => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) - }) - } - - assert!(secret_input(None).is_ok()); - assert!(secret_input(Some("x".to_string())).is_ok()); - } - - #[test] - fn test_node_key_config_file() { - fn secret_file(net_config_dir: Option) -> error::Result<()> { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; - let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); - let params = NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: Some(file.clone()) - }; - node_key_config(params, &net_config_dir).and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if node_key_type == NodeKeyType::Ed25519 && f == &file => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) - }) - } - - assert!(secret_file(None).is_ok()); - assert!(secret_file(Some("x".to_string())).is_ok()); - } - - #[test] - fn test_node_key_config_default() { - fn with_def_params(f: F) -> error::Result<()> - where - F: Fn(NodeKeyParams) -> error::Result<()> - { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - f(NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: None - }) - }) - } - - fn no_config_dir() -> error::Result<()> { - with_def_params(|params| { - let typ = params.node_key_type; - node_key_config::(params, &None) - .and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::New) - if typ == NodeKeyType::Ed25519 => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) - }) - } - - fn some_config_dir(net_config_dir: String) -> error::Result<()> { - with_def_params(|params| { - let dir = PathBuf::from(net_config_dir.clone()); - let typ = params.node_key_type; - node_key_config(params, &Some(net_config_dir.clone())) - .and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 && - f == &dir.join(NODE_KEY_ED25519_FILE) => Ok(()), - _ => Err(error::Error::Input("Unexpected node key config".into())) - }) - }) - } - - assert!(no_config_dir().is_ok()); - assert!(some_config_dir("x".to_string()).is_ok()); - } -} diff --git a/client/cli/src/params.rs b/client/cli/src/params.rs deleted file mode 100644 index aaa46c0f63813d15c61ec214bb87cc89607f112c..0000000000000000000000000000000000000000 --- a/client/cli/src/params.rs +++ /dev/null @@ -1,1198 +0,0 @@ -// Copyright 2018-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::{str::FromStr, path::PathBuf}; -use structopt::{StructOpt, clap::arg_enum}; -use sc_service::{ - AbstractService, Configuration, ChainSpecExtension, RuntimeGenesis, ServiceBuilderCommand, - config::DatabaseConfig, -}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; -use crate::VersionInfo; -use crate::error; -use std::fmt::Debug; -use log::info; -use sc_network::config::build_multiaddr; -use std::io; -use std::fs; -use std::io::{Read, Write, Seek}; -use sp_runtime::generic::BlockId; -use crate::runtime::run_until_exit; -use crate::node_key::node_key_config; -use crate::execution_strategy::*; - -pub use crate::execution_strategy::ExecutionStrategy; - -impl Into for ExecutionStrategy { - fn into(self) -> sc_client_api::ExecutionStrategy { - match self { - ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible, - ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm, - ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both, - ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm, - } - } -} - -#[allow(missing_docs)] -mod wasm_execution_method { - use super::*; - - arg_enum! { - /// How to execute Wasm runtime code - #[derive(Debug, Clone, Copy)] - pub enum WasmExecutionMethod { - // Uses an interpreter. - Interpreted, - // Uses a compiled runtime. - Compiled, - } - } - - impl WasmExecutionMethod { - /// Returns list of variants that are not disabled by feature flags. - pub fn enabled_variants() -> Vec<&'static str> { - Self::variants() - .iter() - .cloned() - .filter(|&name| cfg!(feature = "wasmtime") || name != "Compiled") - .collect() - } - } -} - -pub use wasm_execution_method::WasmExecutionMethod; - -impl Into for WasmExecutionMethod { - fn into(self) -> sc_service::config::WasmExecutionMethod { - match self { - WasmExecutionMethod::Interpreted => sc_service::config::WasmExecutionMethod::Interpreted, - #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, - #[cfg(not(feature = "wasmtime"))] - WasmExecutionMethod::Compiled => panic!( - "Substrate must be compiled with \"wasmtime\" feature for compiled Wasm execution" - ), - } - } -} - -arg_enum! { - /// Whether off-chain workers are enabled. - #[allow(missing_docs)] - #[derive(Debug, Clone)] - pub enum OffchainWorkerEnabled { - Always, - Never, - WhenValidating, - } -} - -/// Shared parameters used by all `CoreParams`. -#[derive(Debug, StructOpt, Clone)] -pub struct SharedParams { - /// Specify the chain specification (one of dev, local or staging). - #[structopt(long = "chain", value_name = "CHAIN_SPEC")] - pub chain: Option, - - /// Specify the development chain. - #[structopt(long = "dev")] - pub dev: bool, - - /// Specify custom base path. - #[structopt(long = "base-path", short = "d", value_name = "PATH", parse(from_os_str))] - pub base_path: Option, - - /// Sets a custom logging filter. - #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] - pub log: Option, -} - -/// Parameters for block import. -#[derive(Debug, StructOpt, Clone)] -pub struct ImportParams { - /// Specify the state pruning mode, a number of blocks to keep or 'archive'. - /// - /// Default is to keep all block states if the node is running as a - /// validator (i.e. 'archive'), otherwise state is only kept for the last - /// 256 blocks. - #[structopt(long = "pruning", value_name = "PRUNING_MODE")] - pub pruning: Option, - - /// Force start with unsafe pruning settings. - /// - /// When running as a validator it is highly recommended to disable state - /// pruning (i.e. 'archive') which is the default. The node will refuse to - /// start as a validator if pruning is enabled unless this option is set. - #[structopt(long = "unsafe-pruning")] - pub unsafe_pruning: bool, - - /// Method for executing Wasm runtime code. - #[structopt( - long = "wasm-execution", - value_name = "METHOD", - possible_values = &WasmExecutionMethod::enabled_variants(), - case_insensitive = true, - default_value = "Interpreted" - )] - pub wasm_method: WasmExecutionMethod, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub execution_strategies: ExecutionStrategies, - - /// Limit the memory the database cache can use. - #[structopt(long = "db-cache", value_name = "MiB", default_value = "1024")] - pub database_cache_size: u32, - - /// Specify the state cache size. - #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] - pub state_cache_size: usize, - - /// Comma separated list of targets for tracing - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages - #[structopt( - long = "tracing-receiver", - value_name = "RECEIVER", - possible_values = &TracingReceiver::variants(), - case_insensitive = true, - default_value = "Log" - )] - pub tracing_receiver: TracingReceiver, -} - -/// Parameters used to create the network configuration. -#[derive(Debug, StructOpt, Clone)] -pub struct NetworkConfigurationParams { - /// Specify a list of bootnodes. - #[structopt(long = "bootnodes", value_name = "URL")] - pub bootnodes: Vec, - - /// Specify a list of reserved node addresses. - #[structopt(long = "reserved-nodes", value_name = "URL")] - pub reserved_nodes: Vec, - - /// Whether to only allow connections to/from reserved nodes. - /// - /// If you are a validator your node might still connect to other validator - /// nodes regardless of whether they are defined as reserved nodes. - #[structopt(long = "reserved-only")] - pub reserved_only: bool, - - /// Specify a list of sentry node public addresses. - #[structopt( - long = "sentry-nodes", - value_name = "URL", - conflicts_with_all = &[ "sentry" ] - )] - pub sentry_nodes: Vec, - - /// Listen on this multiaddress. - #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] - pub listen_addr: Vec, - - /// Specify p2p protocol TCP port. - /// - /// Only used if --listen-addr is not specified. - #[structopt(long = "port", value_name = "PORT")] - pub port: Option, - - /// Forbid connecting to private IPv4 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. - #[structopt(long = "no-private-ipv4")] - pub no_private_ipv4: bool, - - /// Specify the number of outgoing connections we're trying to maintain. - #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] - pub out_peers: u32, - - /// Specify the maximum number of incoming connections we're accepting. - #[structopt(long = "in-peers", value_name = "COUNT", default_value = "25")] - pub in_peers: u32, - - /// Disable mDNS discovery. - /// - /// By default, the network will use mDNS to discover other nodes on the - /// local network. This disables it. Automatically implied when using --dev. - #[structopt(long = "no-mdns")] - pub no_mdns: bool, - - /// Maximum number of peers to ask the same blocks in parallel. - /// - /// This allows downlading announced blocks from multiple peers. Decrease to save - /// traffic and risk increased latency. - #[structopt(long = "max-parallel-downloads", value_name = "COUNT", default_value = "5")] - pub max_parallel_downloads: u32, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams, - - /// Experimental feature flag. - #[structopt(long = "use-yamux-flow-control")] - pub use_yamux_flow_control: bool, -} - -arg_enum! { - #[allow(missing_docs)] - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub enum NodeKeyType { - Ed25519 - } -} - -/// Parameters used to create the `NodeKeyConfig`, which determines the keypair -/// used for libp2p networking. -#[derive(Debug, StructOpt, Clone)] -pub struct NodeKeyParams { - /// The secret key to use for libp2p networking. - /// - /// The value is a string that is parsed according to the choice of - /// `--node-key-type` as follows: - /// - /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 bytes secret key, - /// i.e. 64 hex characters. - /// - /// The value of this option takes precedence over `--node-key-file`. - /// - /// WARNING: Secrets provided as command-line arguments are easily exposed. - /// Use of this option should be limited to development and testing. To use - /// an externally managed secret key, use `--node-key-file` instead. - #[structopt(long = "node-key", value_name = "KEY")] - pub node_key: Option, - - /// The type of secret key to use for libp2p networking. - /// - /// The secret key of the node is obtained as follows: - /// - /// * If the `--node-key` option is given, the value is parsed as a secret key - /// according to the type. See the documentation for `--node-key`. - /// - /// * If the `--node-key-file` option is given, the secret key is read from the - /// specified file. See the documentation for `--node-key-file`. - /// - /// * Otherwise, the secret key is read from a file with a predetermined, - /// type-specific name from the chain-specific network config directory - /// inside the base directory specified by `--base-dir`. If this file does - /// not exist, it is created with a newly generated secret key of the - /// chosen type. - /// - /// The node's secret key determines the corresponding public key and hence the - /// node's peer ID in the context of libp2p. - #[structopt( - long = "node-key-type", - value_name = "TYPE", - possible_values = &NodeKeyType::variants(), - case_insensitive = true, - default_value = "Ed25519" - )] - pub node_key_type: NodeKeyType, - - /// The file from which to read the node's secret key to use for libp2p networking. - /// - /// The contents of the file are parsed according to the choice of `--node-key-type` - /// as follows: - /// - /// `ed25519`: - /// The file must contain an unencoded 32 bytes Ed25519 secret key. - /// - /// If the file does not exist, it is created with a newly generated secret key of - /// the chosen type. - #[structopt(long = "node-key-file", value_name = "FILE")] - pub node_key_file: Option, -} - -/// Parameters used to create the pool configuration. -#[derive(Debug, StructOpt, Clone)] -pub struct TransactionPoolParams { - /// Maximum number of transactions in the transaction pool. - #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] - pub pool_limit: usize, - /// Maximum number of kilobytes of all transactions stored in the pool. - #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "20480")] - pub pool_kbytes: usize, -} - -arg_enum! { - #[allow(missing_docs)] - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub enum TracingReceiver { - Log, - Telemetry, - Grafana, - } -} - -impl Into for TracingReceiver { - fn into(self) -> sc_tracing::TracingReceiver { - match self { - TracingReceiver::Log => sc_tracing::TracingReceiver::Log, - TracingReceiver::Telemetry => sc_tracing::TracingReceiver::Telemetry, - TracingReceiver::Grafana => sc_tracing::TracingReceiver::Grafana, - } - } -} - -/// Execution strategies parameters. -#[derive(Debug, StructOpt, Clone)] -pub struct ExecutionStrategies { - /// The means of execution used when calling into the runtime while syncing blocks. - #[structopt( - long = "execution-syncing", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - default_value = DEFAULT_EXECUTION_SYNCING.as_str(), - )] - pub execution_syncing: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while importing blocks. - #[structopt( - long = "execution-import-block", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - default_value = DEFAULT_EXECUTION_IMPORT_BLOCK.as_str(), - )] - pub execution_import_block: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while constructing blocks. - #[structopt( - long = "execution-block-construction", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - default_value = DEFAULT_EXECUTION_BLOCK_CONSTRUCTION.as_str(), - )] - pub execution_block_construction: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while using an off-chain worker. - #[structopt( - long = "execution-offchain-worker", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - default_value = DEFAULT_EXECUTION_OFFCHAIN_WORKER.as_str(), - )] - pub execution_offchain_worker: ExecutionStrategy, - - /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. - #[structopt( - long = "execution-other", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - default_value = DEFAULT_EXECUTION_OTHER.as_str(), - )] - pub execution_other: ExecutionStrategy, - - /// The execution strategy that should be used by all execution contexts. - #[structopt( - long = "execution", - value_name = "STRATEGY", - possible_values = &ExecutionStrategy::variants(), - case_insensitive = true, - conflicts_with_all = &[ - "execution-other", - "execution-offchain-worker", - "execution-block-construction", - "execution-import-block", - "execution-syncing", - ] - )] - pub execution: Option, -} - -/// The `run` command used to run a node. -#[derive(Debug, StructOpt, Clone)] -pub struct RunCmd { - /// Enable validator mode. - /// - /// The node will be started with the authority role and actively - /// participate in any consensus task that it can (e.g. depending on - /// availability of local keys). - #[structopt( - long = "validator", - conflicts_with_all = &[ "sentry" ] - )] - pub validator: bool, - - /// Enable sentry mode. - /// - /// The node will be started with the authority role and participate in - /// consensus tasks as an "observer", it will never actively participate - /// regardless of whether it could (e.g. keys are available locally). This - /// mode is useful as a secure proxy for validators (which would run - /// detached from the network), since we want this node to participate in - /// the full consensus protocols in order to have all needed consensus data - /// available to relay to private nodes. - #[structopt( - long = "sentry", - conflicts_with_all = &[ "validator", "light" ] - )] - pub sentry: bool, - - /// Disable GRANDPA voter when running in validator mode, otherwise disables the GRANDPA observer. - #[structopt(long = "no-grandpa")] - pub no_grandpa: bool, - - /// Experimental: Run in light client mode. - #[structopt(long = "light", conflicts_with = "sentry")] - pub light: bool, - - /// Listen to all RPC interfaces. - /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. - /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. - #[structopt(long = "rpc-external")] - pub rpc_external: bool, - - /// Listen to all RPC interfaces. - /// - /// Same as `--rpc-external`. - #[structopt(long = "unsafe-rpc-external")] - pub unsafe_rpc_external: bool, - - /// Listen to all Websocket interfaces. - /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use a RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. - /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. - #[structopt(long = "ws-external")] - pub ws_external: bool, - - /// Listen to all Websocket interfaces. - /// - /// Same as `--ws-external`. - #[structopt(long = "unsafe-ws-external")] - pub unsafe_ws_external: bool, - - /// Listen to all Grafana data source interfaces. - /// - /// Default is local. - #[structopt(long = "grafana-external")] - pub grafana_external: bool, - - /// Specify HTTP RPC server TCP port. - #[structopt(long = "rpc-port", value_name = "PORT")] - pub rpc_port: Option, - - /// Specify WebSockets RPC server TCP port. - #[structopt(long = "ws-port", value_name = "PORT")] - pub ws_port: Option, - - /// Maximum number of WS RPC server connections. - #[structopt(long = "ws-max-connections", value_name = "COUNT")] - pub ws_max_connections: Option, - - /// Specify browser Origins allowed to access the HTTP & WS RPC servers. - /// - /// A comma-separated list of origins (protocol://domain or special `null` - /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost, https://polkadot.js.org and - /// https://substrate-ui.parity.io origins. When running in --dev mode the - /// default is to allow all origins. - #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] - pub rpc_cors: Option, - - /// Specify Grafana data source server TCP Port. - #[structopt(long = "grafana-port", value_name = "PORT")] - pub grafana_port: Option, - - /// The human-readable name for this node. - /// - /// The node name will be reported to the telemetry server, if enabled. - #[structopt(long = "name", value_name = "NAME")] - pub name: Option, - - /// Disable connecting to the Substrate telemetry server. - /// - /// Telemetry is on by default on global chains. - #[structopt(long = "no-telemetry")] - pub no_telemetry: bool, - - /// The URL of the telemetry server to connect to. - /// - /// This flag can be passed multiple times as a mean to specify multiple - /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting - /// the least verbosity. If no verbosity level is specified the default is - /// 0. - #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] - pub telemetry_endpoints: Vec<(String, u8)>, - - /// Should execute offchain workers on every block. - /// - /// By default it's only enabled for nodes that are authoring new blocks. - #[structopt( - long = "offchain-worker", - value_name = "ENABLED", - possible_values = &OffchainWorkerEnabled::variants(), - case_insensitive = true, - default_value = "WhenValidating" - )] - pub offchain_worker: OffchainWorkerEnabled, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub network_config: NetworkConfigurationParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub pool_config: TransactionPoolParams, - - /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. - #[structopt(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] - pub alice: bool, - - /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] - pub bob: bool, - - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] - pub charlie: bool, - - /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] - pub dave: bool, - - /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] - pub eve: bool, - - /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] - pub ferdie: bool, - - /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] - pub one: bool, - - /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] - pub two: bool, - - /// Enable authoring even when offline. - #[structopt(long = "force-authoring")] - pub force_authoring: bool, - - /// Specify custom keystore path. - #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] - pub keystore_path: Option, - - /// Use interactive shell for entering the password used by the keystore. - #[structopt( - long = "password-interactive", - conflicts_with_all = &[ "password", "password-filename" ] - )] - pub password_interactive: bool, - - /// Password used by the keystore. - #[structopt( - long = "password", - conflicts_with_all = &[ "password-interactive", "password-filename" ] - )] - pub password: Option, - - /// File that contains the password used by the keystore. - #[structopt( - long = "password-filename", - value_name = "PATH", - parse(from_os_str), - conflicts_with_all = &[ "password-interactive", "password" ] - )] - pub password_filename: Option -} - -impl RunCmd { - /// Get the `Sr25519Keyring` matching one of the flag - pub fn get_keyring(&self) -> Option { - use sp_keyring::Sr25519Keyring::*; - - if self.alice { Some(Alice) } - else if self.bob { Some(Bob) } - else if self.charlie { Some(Charlie) } - else if self.dave { Some(Dave) } - else if self.eve { Some(Eve) } - else if self.ferdie { Some(Ferdie) } - else if self.one { Some(One) } - else if self.two { Some(Two) } - else { None } - } -} - -/// Default to verbosity level 0, if none is provided. -fn parse_telemetry_endpoints(s: &str) -> Result<(String, u8), Box> { - let pos = s.find(' '); - match pos { - None => { - Ok((s.to_owned(), 0)) - }, - Some(pos_) => { - let verbosity = s[pos_ + 1..].parse()?; - let url = s[..pos_].parse()?; - Ok((url, verbosity)) - } - } -} - -/// CORS setting -/// -/// The type is introduced to overcome `Option>` -/// handling of `structopt`. -#[derive(Clone, Debug)] -pub enum Cors { - /// All hosts allowed - All, - /// Only hosts on the list are allowed. - List(Vec), -} - -impl From for Option> { - fn from(cors: Cors) -> Self { - match cors { - Cors::All => None, - Cors::List(list) => Some(list), - } - } -} - -/// Parse cors origins -fn parse_cors(s: &str) -> Result> { - let mut is_all = false; - let mut origins = Vec::new(); - for part in s.split(',') { - match part { - "all" | "*" => { - is_all = true; - break; - }, - other => origins.push(other.to_owned()), - } - } - - Ok(if is_all { Cors::All } else { Cors::List(origins) }) -} - -/// The `build-spec` command used to build a specification. -#[derive(Debug, StructOpt, Clone)] -pub struct BuildSpecCmd { - /// Force raw genesis storage output. - #[structopt(long = "raw")] - pub raw: bool, - - /// Disable adding the default bootnode to the specification. - /// - /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the - /// specification when no bootnode exists. - #[structopt(long = "disable-default-bootnode")] - pub disable_default_bootnode: bool, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams, -} - -/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. -#[derive(Debug, Clone)] -pub struct BlockNumber(String); - -impl FromStr for BlockNumber { - type Err = String; - - fn from_str(block_number: &str) -> Result { - if block_number.chars().any(|d| !d.is_digit(10)) { - Err(format!( - "Invalid block number: {}, expected decimal formatted unsigned integer", - block_number - )) - } else { - Ok(Self(block_number.to_owned())) - } - } -} - -impl BlockNumber { - /// Wrapper on top of `std::str::parse` but with `Error` as a `String` - /// - /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate - /// documentation. - pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, - { - self.0 - .parse() - .map_err(|e| format!("BlockNumber: {} parsing failed because of {:?}", self.0, e)) - } -} - -/// The `export-blocks` command used to export blocks. -#[derive(Debug, StructOpt, Clone)] -pub struct ExportBlocksCmd { - /// Output file name or stdout if unspecified. - #[structopt(parse(from_os_str))] - pub output: Option, - - /// Specify starting block number. - /// - /// Default is 1. - #[structopt(long = "from", value_name = "BLOCK")] - pub from: Option, - - /// Specify last block number. - /// - /// Default is best block. - #[structopt(long = "to", value_name = "BLOCK")] - pub to: Option, - - /// Use JSON output rather than binary. - #[structopt(long = "json")] - pub json: bool, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, -} - -/// The `import-blocks` command used to import blocks. -#[derive(Debug, StructOpt, Clone)] -pub struct ImportBlocksCmd { - /// Input file or stdin if unspecified. - #[structopt(parse(from_os_str))] - pub input: Option, - - /// The default number of 64KB pages to ever allocate for Wasm execution. - /// - /// Don't alter this unless you know what you're doing. - #[structopt(long = "default-heap-pages", value_name = "COUNT")] - pub default_heap_pages: Option, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, -} - -/// The `check-block` command used to validate blocks. -#[derive(Debug, StructOpt, Clone)] -pub struct CheckBlockCmd { - /// Block hash or number - #[structopt(value_name = "HASH or NUMBER")] - pub input: String, - - /// The default number of 64KB pages to ever allocate for Wasm execution. - /// - /// Don't alter this unless you know what you're doing. - #[structopt(long = "default-heap-pages", value_name = "COUNT")] - pub default_heap_pages: Option, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, -} - -/// The `revert` command used revert the chain to a previous state. -#[derive(Debug, StructOpt, Clone)] -pub struct RevertCmd { - /// Number of blocks to revert. - #[structopt(default_value = "256")] - pub num: BlockNumber, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, -} - -/// The `purge-chain` command used to remove the whole chain. -#[derive(Debug, StructOpt, Clone)] -pub struct PurgeChainCmd { - /// Skip interactive prompt by answering yes automatically. - #[structopt(short = "y")] - pub yes: bool, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, -} - -/// All core commands that are provided by default. -/// -/// The core commands are split into multiple subcommands and `Run` is the default subcommand. From -/// the CLI user perspective, it is not visible that `Run` is a subcommand. So, all parameters of -/// `Run` are exported as main executable parameters. -#[derive(Debug, Clone, StructOpt)] -pub enum Subcommand { - /// Build a spec.json file, outputing to stdout. - BuildSpec(BuildSpecCmd), - - /// Export blocks to a file. - ExportBlocks(ExportBlocksCmd), - - /// Import blocks from file. - ImportBlocks(ImportBlocksCmd), - - /// Validate a single block. - CheckBlock(CheckBlockCmd), - - /// Revert chain to the previous state. - Revert(RevertCmd), - - /// Remove the whole chain data. - PurgeChain(PurgeChainCmd), -} - -impl Subcommand { - /// Get the shared parameters of a `CoreParams` command - pub fn get_shared_params(&self) -> &SharedParams { - use Subcommand::*; - - match self { - BuildSpec(params) => ¶ms.shared_params, - ExportBlocks(params) => ¶ms.shared_params, - ImportBlocks(params) => ¶ms.shared_params, - CheckBlock(params) => ¶ms.shared_params, - Revert(params) => ¶ms.shared_params, - PurgeChain(params) => ¶ms.shared_params, - } - } - - /// Run any `CoreParams` command - pub fn run( - self, - config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - match self { - Subcommand::BuildSpec(cmd) => cmd.run(config), - Subcommand::ExportBlocks(cmd) => cmd.run(config, builder), - Subcommand::ImportBlocks(cmd) => cmd.run(config, builder), - Subcommand::CheckBlock(cmd) => cmd.run(config, builder), - Subcommand::PurgeChain(cmd) => cmd.run(config), - Subcommand::Revert(cmd) => cmd.run(config, builder), - } - } -} - -impl RunCmd { - /// Run the command that runs the node - pub fn run( - self, - mut config: Configuration, - new_light: FNL, - new_full: FNF, - version: &VersionInfo, - ) -> error::Result<()> - where - G: RuntimeGenesis, - E: ChainSpecExtension, - FNL: FnOnce(Configuration) -> Result, - FNF: FnOnce(Configuration) -> Result, - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - crate::update_config_for_running_node(&mut config, self)?; - - crate::run_node(config, new_light, new_full, &version) - } -} - -impl BuildSpecCmd { - /// Run the build-spec command - pub fn run( - self, - config: Configuration, - ) -> error::Result<()> - where - G: RuntimeGenesis, - E: ChainSpecExtension, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - info!("Building chain spec"); - let mut spec = config.expect_chain_spec().clone(); - let raw_output = self.raw; - - if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { - let node_key = node_key_config( - self.node_key_params.clone(), - &Some(config - .in_chain_config_dir(crate::DEFAULT_NETWORK_CONFIG_PATH) - .expect("We provided a base_path")), - )?; - let keys = node_key.into_keypair()?; - let peer_id = keys.public().into_peer_id(); - let addr = build_multiaddr![ - Ip4([127, 0, 0, 1]), - Tcp(30333u16), - P2p(peer_id) - ]; - spec.add_boot_node(addr) - } - - let json = sc_service::chain_ops::build_spec(spec, raw_output)?; - - print!("{}", json); - - Ok(()) - } -} - -impl ExportBlocksCmd { - /// Run the export-blocks command - pub fn run( - self, - mut config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - crate::fill_config_keystore_in_memory(&mut config)?; - - if let DatabaseConfig::Path { ref path, .. } = config.expect_database() { - info!("DB path: {}", path.display()); - } - let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); - let to = self.to.as_ref().and_then(|t| t.parse().ok()); - - let json = self.json; - - let file: Box = match &self.output { - Some(filename) => Box::new(fs::File::create(filename)?), - None => Box::new(io::stdout()), - }; - - run_until_exit(config, |config| { - Ok(builder(config)?.export_blocks(file, from.into(), to, json)) - }) - } -} - -/// Internal trait used to cast to a dynamic type that implements Read and Seek. -trait ReadPlusSeek: Read + Seek {} - -impl ReadPlusSeek for T {} - -impl ImportBlocksCmd { - /// Run the import-blocks command - pub fn run( - self, - mut config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - crate::fill_import_params( - &mut config, - &self.import_params, - sc_service::Roles::FULL, - self.shared_params.dev, - )?; - - let file: Box = match &self.input { - Some(filename) => Box::new(fs::File::open(filename)?), - None => { - let mut buffer = Vec::new(); - io::stdin().read_to_end(&mut buffer)?; - Box::new(io::Cursor::new(buffer)) - }, - }; - - run_until_exit(config, |config| { - Ok(builder(config)?.import_blocks(file, false)) - }) - } -} - -impl CheckBlockCmd { - /// Run the check-block command - pub fn run( - self, - mut config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - crate::fill_import_params( - &mut config, - &self.import_params, - sc_service::Roles::FULL, - self.shared_params.dev, - )?; - crate::fill_config_keystore_in_memory(&mut config)?; - - let input = if self.input.starts_with("0x") { &self.input[2..] } else { &self.input[..] }; - let block_id = match FromStr::from_str(input) { - Ok(hash) => BlockId::hash(hash), - Err(_) => match self.input.parse::() { - Ok(n) => BlockId::number((n as u32).into()), - Err(_) => return Err(error::Error::Input("Invalid hash or number specified".into())), - } - }; - - let start = std::time::Instant::now(); - run_until_exit(config, |config| { - Ok(builder(config)?.check_block(block_id)) - })?; - println!("Completed in {} ms.", start.elapsed().as_millis()); - - Ok(()) - } -} - -impl PurgeChainCmd { - /// Run the purge command - pub fn run( - self, - mut config: Configuration, - ) -> error::Result<()> - where - G: RuntimeGenesis, - E: ChainSpecExtension, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - crate::fill_config_keystore_in_memory(&mut config)?; - - let db_path = match config.expect_database() { - DatabaseConfig::Path { path, .. } => path, - _ => { - eprintln!("Cannot purge custom database implementation"); - return Ok(()); - } - }; - - if !self.yes { - print!("Are you sure to remove {:?}? [y/N]: ", &db_path); - io::stdout().flush().expect("failed to flush stdout"); - - let mut input = String::new(); - io::stdin().read_line(&mut input)?; - let input = input.trim(); - - match input.chars().nth(0) { - Some('y') | Some('Y') => {}, - _ => { - println!("Aborted"); - return Ok(()); - }, - } - } - - match fs::remove_dir_all(&db_path) { - Ok(_) => { - println!("{:?} removed.", &db_path); - Ok(()) - }, - Err(ref err) if err.kind() == io::ErrorKind::NotFound => { - eprintln!("{:?} did not exist.", &db_path); - Ok(()) - }, - Err(err) => Result::Err(err.into()) - } - } -} - -impl RevertCmd { - /// Run the revert command - pub fn run( - self, - mut config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - G: RuntimeGenesis, - E: ChainSpecExtension, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - assert!(config.chain_spec.is_some(), "chain_spec must be present before continuing"); - - crate::fill_config_keystore_in_memory(&mut config)?; - - let blocks = self.num.parse()?; - builder(config)?.revert_chain(blocks)?; - - Ok(()) - } -} diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..95d7623d11da3224284c46a3f78aec7e73fad926 --- /dev/null +++ b/client/cli/src/params/import_params.rs @@ -0,0 +1,194 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use structopt::StructOpt; +use sc_service::{Configuration, RuntimeGenesis, config::DatabaseConfig}; + +use crate::error; +use crate::arg_enums::{ + WasmExecutionMethod, TracingReceiver, ExecutionStrategy, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, + DEFAULT_EXECUTION_SYNCING +}; +use crate::params::PruningParams; + +/// Parameters for block import. +#[derive(Debug, StructOpt, Clone)] +pub struct ImportParams { + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, + + /// Force start with unsafe pruning settings. + /// + /// When running as a validator it is highly recommended to disable state + /// pruning (i.e. 'archive') which is the default. The node will refuse to + /// start as a validator if pruning is enabled unless this option is set. + #[structopt(long = "unsafe-pruning")] + pub unsafe_pruning: bool, + + /// Method for executing Wasm runtime code. + #[structopt( + long = "wasm-execution", + value_name = "METHOD", + possible_values = &WasmExecutionMethod::enabled_variants(), + case_insensitive = true, + default_value = "Interpreted" + )] + pub wasm_method: WasmExecutionMethod, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub execution_strategies: ExecutionStrategies, + + /// Limit the memory the database cache can use. + #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] + pub database_cache_size: u32, + + /// Specify the state cache size. + #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] + pub state_cache_size: usize, + + /// Comma separated list of targets for tracing + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages + #[structopt( + long = "tracing-receiver", + value_name = "RECEIVER", + possible_values = &TracingReceiver::variants(), + case_insensitive = true, + default_value = "Log" + )] + pub tracing_receiver: TracingReceiver, +} + +impl ImportParams { + /// Put block import CLI params into `config` object. + pub fn update_config( + &self, + mut config: &mut Configuration, + role: sc_service::Roles, + is_dev: bool, + ) -> error::Result<()> + where + G: RuntimeGenesis, + { + use sc_client_api::execution_extensions::ExecutionStrategies; + + if let Some(DatabaseConfig::Path { ref mut cache_size, .. }) = config.database { + *cache_size = Some(self.database_cache_size); + } + + config.state_cache_size = self.state_cache_size; + + self.pruning_params.update_config(&mut config, role, self.unsafe_pruning)?; + + config.wasm_method = self.wasm_method.into(); + + let exec = &self.execution_strategies; + let exec_all_or = |strat: ExecutionStrategy, default: ExecutionStrategy| { + exec.execution.unwrap_or(if strat == default && is_dev { + ExecutionStrategy::Native + } else { + strat + }).into() + }; + + config.execution_strategies = ExecutionStrategies { + syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), + importing: exec_all_or(exec.execution_import_block, DEFAULT_EXECUTION_IMPORT_BLOCK), + block_construction: + exec_all_or(exec.execution_block_construction, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION), + offchain_worker: + exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), + other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), + }; + + Ok(()) + } +} + +/// Execution strategies parameters. +#[derive(Debug, StructOpt, Clone)] +pub struct ExecutionStrategies { + /// The means of execution used when calling into the runtime while syncing blocks. + #[structopt( + long = "execution-syncing", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = DEFAULT_EXECUTION_SYNCING.as_str(), + )] + pub execution_syncing: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while importing blocks. + #[structopt( + long = "execution-import-block", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = DEFAULT_EXECUTION_IMPORT_BLOCK.as_str(), + )] + pub execution_import_block: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while constructing blocks. + #[structopt( + long = "execution-block-construction", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = DEFAULT_EXECUTION_BLOCK_CONSTRUCTION.as_str(), + )] + pub execution_block_construction: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while using an off-chain worker. + #[structopt( + long = "execution-offchain-worker", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = DEFAULT_EXECUTION_OFFCHAIN_WORKER.as_str(), + )] + pub execution_offchain_worker: ExecutionStrategy, + + /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. + #[structopt( + long = "execution-other", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + default_value = DEFAULT_EXECUTION_OTHER.as_str(), + )] + pub execution_other: ExecutionStrategy, + + /// The execution strategy that should be used by all execution contexts. + #[structopt( + long = "execution", + value_name = "STRATEGY", + possible_values = &ExecutionStrategy::variants(), + case_insensitive = true, + conflicts_with_all = &[ + "execution-other", + "execution-offchain-worker", + "execution-block-construction", + "execution-import-block", + "execution-syncing", + ] + )] + pub execution: Option, +} diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..f684cab336423159c6a18ff492c3d7741c4cfded --- /dev/null +++ b/client/cli/src/params/mod.rs @@ -0,0 +1,67 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +mod import_params; +mod transaction_pool_params; +mod shared_params; +mod node_key_params; +mod network_configuration_params; +mod pruning_params; + +use std::str::FromStr; +use std::fmt::Debug; + +pub use crate::params::import_params::*; +pub use crate::params::transaction_pool_params::*; +pub use crate::params::shared_params::*; +pub use crate::params::node_key_params::*; +pub use crate::params::network_configuration_params::*; +pub use crate::params::pruning_params::*; + +/// Wrapper type of `String` that holds an unsigned integer of arbitrary size, formatted as a decimal. +#[derive(Debug, Clone)] +pub struct BlockNumber(String); + +impl FromStr for BlockNumber { + type Err = String; + + fn from_str(block_number: &str) -> Result { + if block_number.chars().any(|d| !d.is_digit(10)) { + Err(format!( + "Invalid block number: {}, expected decimal formatted unsigned integer", + block_number, + )) + } else { + Ok(Self(block_number.to_owned())) + } + } +} + +impl BlockNumber { + /// Wrapper on top of `std::str::parse` but with `Error` as a `String` + /// + /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate + /// documentation. + pub fn parse(&self) -> Result + where + N: FromStr, + N::Err: std::fmt::Debug, + { + self.0 + .parse() + .map_err(|e| format!("BlockNumber: {} parsing failed because of {:?}", self.0, e)) + } +} diff --git a/client/cli/src/params/network_configuration_params.rs b/client/cli/src/params/network_configuration_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..eef679d6a615fa4acc9c33598c0cc2c1a0c04d03 --- /dev/null +++ b/client/cli/src/params/network_configuration_params.rs @@ -0,0 +1,160 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::path::PathBuf; +use std::iter; +use std::net::Ipv4Addr; +use structopt::StructOpt; +use sc_network::{ + config::{NonReservedPeerMode, TransportConfig}, multiaddr::Protocol, +}; +use sc_service::{Configuration, RuntimeGenesis}; + +use crate::error; +use crate::params::node_key_params::NodeKeyParams; + +/// Parameters used to create the network configuration. +#[derive(Debug, StructOpt, Clone)] +pub struct NetworkConfigurationParams { + /// Specify a list of bootnodes. + #[structopt(long = "bootnodes", value_name = "URL")] + pub bootnodes: Vec, + + /// Specify a list of reserved node addresses. + #[structopt(long = "reserved-nodes", value_name = "URL")] + pub reserved_nodes: Vec, + + /// Whether to only allow connections to/from reserved nodes. + /// + /// If you are a validator your node might still connect to other validator + /// nodes regardless of whether they are defined as reserved nodes. + #[structopt(long = "reserved-only")] + pub reserved_only: bool, + + /// Specify a list of sentry node public addresses. + #[structopt( + long = "sentry-nodes", + value_name = "URL", + conflicts_with_all = &[ "sentry" ] + )] + pub sentry_nodes: Vec, + + /// Listen on this multiaddress. + #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] + pub listen_addr: Vec, + + /// Specify p2p protocol TCP port. + /// + /// Only used if --listen-addr is not specified. + #[structopt(long = "port", value_name = "PORT")] + pub port: Option, + + /// Forbid connecting to private IPv4 addresses (as specified in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with + /// `--reserved-nodes` or `--bootnodes`. + #[structopt(long = "no-private-ipv4")] + pub no_private_ipv4: bool, + + /// Specify the number of outgoing connections we're trying to maintain. + #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] + pub out_peers: u32, + + /// Specify the maximum number of incoming connections we're accepting. + #[structopt(long = "in-peers", value_name = "COUNT", default_value = "25")] + pub in_peers: u32, + + /// Disable mDNS discovery. + /// + /// By default, the network will use mDNS to discover other nodes on the + /// local network. This disables it. Automatically implied when using --dev. + #[structopt(long = "no-mdns")] + pub no_mdns: bool, + + /// Maximum number of peers to ask the same blocks in parallel. + /// + /// This allows downlading announced blocks from multiple peers. Decrease to save + /// traffic and risk increased latency. + #[structopt(long = "max-parallel-downloads", value_name = "COUNT", default_value = "5")] + pub max_parallel_downloads: u32, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, + + /// Experimental feature flag. + #[structopt(long = "use-yamux-flow-control")] + pub use_yamux_flow_control: bool, +} + +impl NetworkConfigurationParams { + /// Fill the given `NetworkConfiguration` by looking at the cli parameters. + pub fn update_config( + &self, + mut config: &mut Configuration, + config_path: PathBuf, + client_id: String, + is_dev: bool, + ) -> error::Result<()> + where + G: RuntimeGenesis, + { + config.network.boot_nodes.extend(self.bootnodes.clone()); + config.network.config_path = Some(config_path.clone()); + config.network.net_config_path = Some(config_path.clone()); + + config.network.reserved_nodes.extend(self.reserved_nodes.clone()); + if self.reserved_only { + config.network.non_reserved_mode = NonReservedPeerMode::Deny; + } + + config.network.sentry_nodes.extend(self.sentry_nodes.clone()); + + for addr in self.listen_addr.iter() { + let addr = addr.parse().ok().ok_or(error::Error::InvalidListenMultiaddress)?; + config.network.listen_addresses.push(addr); + } + + if config.network.listen_addresses.is_empty() { + let port = match self.port { + Some(port) => port, + None => 30333, + }; + + config.network.listen_addresses = vec![ + iter::once(Protocol::Ip4(Ipv4Addr::new(0, 0, 0, 0))) + .chain(iter::once(Protocol::Tcp(port))) + .collect() + ]; + } + + config.network.client_version = client_id; + self.node_key_params.update_config(&mut config, Some(&config_path))?; + + config.network.in_peers = self.in_peers; + config.network.out_peers = self.out_peers; + + config.network.transport = TransportConfig::Normal { + enable_mdns: !is_dev && !self.no_mdns, + allow_private_ipv4: !self.no_private_ipv4, + wasm_external_transport: None, + use_yamux_flow_control: self.use_yamux_flow_control, + }; + + config.network.max_parallel_downloads = self.max_parallel_downloads; + + Ok(()) + } +} diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..ddc1d6cc21af7ce91715aa64e4b3cbc5bff06722 --- /dev/null +++ b/client/cli/src/params/node_key_params.rs @@ -0,0 +1,244 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::{path::PathBuf, str::FromStr}; +use structopt::StructOpt; +use sc_service::{Configuration, RuntimeGenesis}; +use sc_network::config::NodeKeyConfig; +use sp_core::H256; + +use crate::error; +use crate::arg_enums::NodeKeyType; + +/// The file name of the node's Ed25519 secret key inside the chain-specific +/// network config directory, if neither `--node-key` nor `--node-key-file` +/// is specified in combination with `--node-key-type=ed25519`. +const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; + +/// Parameters used to create the `NodeKeyConfig`, which determines the keypair +/// used for libp2p networking. +#[derive(Debug, StructOpt, Clone)] +pub struct NodeKeyParams { + /// The secret key to use for libp2p networking. + /// + /// The value is a string that is parsed according to the choice of + /// `--node-key-type` as follows: + /// + /// `ed25519`: + /// The value is parsed as a hex-encoded Ed25519 32 bytes secret key, + /// i.e. 64 hex characters. + /// + /// The value of this option takes precedence over `--node-key-file`. + /// + /// WARNING: Secrets provided as command-line arguments are easily exposed. + /// Use of this option should be limited to development and testing. To use + /// an externally managed secret key, use `--node-key-file` instead. + #[structopt(long = "node-key", value_name = "KEY")] + pub node_key: Option, + + /// The type of secret key to use for libp2p networking. + /// + /// The secret key of the node is obtained as follows: + /// + /// * If the `--node-key` option is given, the value is parsed as a secret key + /// according to the type. See the documentation for `--node-key`. + /// + /// * If the `--node-key-file` option is given, the secret key is read from the + /// specified file. See the documentation for `--node-key-file`. + /// + /// * Otherwise, the secret key is read from a file with a predetermined, + /// type-specific name from the chain-specific network config directory + /// inside the base directory specified by `--base-dir`. If this file does + /// not exist, it is created with a newly generated secret key of the + /// chosen type. + /// + /// The node's secret key determines the corresponding public key and hence the + /// node's peer ID in the context of libp2p. + #[structopt( + long = "node-key-type", + value_name = "TYPE", + possible_values = &NodeKeyType::variants(), + case_insensitive = true, + default_value = "Ed25519" + )] + pub node_key_type: NodeKeyType, + + /// The file from which to read the node's secret key to use for libp2p networking. + /// + /// The contents of the file are parsed according to the choice of `--node-key-type` + /// as follows: + /// + /// `ed25519`: + /// The file must contain an unencoded 32 bytes Ed25519 secret key. + /// + /// If the file does not exist, it is created with a newly generated secret key of + /// the chosen type. + #[structopt(long = "node-key-file", value_name = "FILE")] + pub node_key_file: Option, +} + +impl NodeKeyParams { + /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context + /// of an optional network config storage directory. + pub fn update_config<'a, G, E>( + &self, + mut config: &'a mut Configuration, + net_config_path: Option<&PathBuf>, + ) -> error::Result<&'a NodeKeyConfig> + where + G: RuntimeGenesis, + { + config.network.node_key = match self.node_key_type { + NodeKeyType::Ed25519 => { + let secret = if let Some(node_key) = self.node_key.as_ref() { + parse_ed25519_secret(node_key)? + } else { + let path = self.node_key_file.clone() + .or_else(|| net_config_path.map(|d| d.join(NODE_KEY_ED25519_FILE))); + + if let Some(path) = path { + sc_network::config::Secret::File(path) + } else { + sc_network::config::Secret::New + } + }; + + NodeKeyConfig::Ed25519(secret) + } + }; + + Ok(&config.network.node_key) + } +} + +/// Create an error caused by an invalid node key argument. +fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { + error::Error::Input(format!("Invalid node key: {}", e)) +} + +/// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. +fn parse_ed25519_secret(hex: &str) -> error::Result { + H256::from_str(&hex).map_err(invalid_node_key).and_then(|bytes| + sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) + .map(sc_network::config::Secret::Input) + .map_err(invalid_node_key)) +} + +#[cfg(test)] +mod tests { + use sc_network::config::identity::ed25519; + use super::*; + + #[test] + fn test_node_key_config_input() { + fn secret_input(net_config_dir: Option<&PathBuf>) -> error::Result<()> { + NodeKeyType::variants().iter().try_for_each(|t| { + let mut config = Configuration::<(), ()>::default(); + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let sk = match node_key_type { + NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec() + }; + let params = NodeKeyParams { + node_key_type, + node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), + node_key_file: None + }; + params.update_config(&mut config, net_config_dir).and_then(|c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) + if node_key_type == NodeKeyType::Ed25519 && + &sk[..] == ski.as_ref() => Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())) + }) + }) + } + + assert!(secret_input(None).is_ok()); + assert!(secret_input(Some(&PathBuf::from_str("x").unwrap())).is_ok()); + } + + #[test] + fn test_node_key_config_file() { + fn secret_file(net_config_dir: Option<&PathBuf>) -> error::Result<()> { + NodeKeyType::variants().iter().try_for_each(|t| { + let mut config = Configuration::<(), ()>::default(); + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; + let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); + let params = NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: Some(file.clone()) + }; + params.update_config(&mut config, net_config_dir).and_then(|c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if node_key_type == NodeKeyType::Ed25519 && f == &file => Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())) + }) + }) + } + + assert!(secret_file(None).is_ok()); + assert!(secret_file(Some(&PathBuf::from_str("x").unwrap())).is_ok()); + } + + #[test] + fn test_node_key_config_default() { + fn with_def_params(f: F) -> error::Result<()> + where + F: Fn(NodeKeyParams) -> error::Result<()> + { + NodeKeyType::variants().iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + f(NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: None + }) + }) + } + + fn no_config_dir() -> error::Result<()> { + with_def_params(|params| { + let mut config = Configuration::<(), ()>::default(); + let typ = params.node_key_type; + params.update_config(&mut config, None) + .and_then(|c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::New) + if typ == NodeKeyType::Ed25519 => Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())) + }) + }) + } + + fn some_config_dir(net_config_dir: &PathBuf) -> error::Result<()> { + with_def_params(|params| { + let mut config = Configuration::<(), ()>::default(); + let dir = PathBuf::from(net_config_dir.clone()); + let typ = params.node_key_type; + params.update_config(&mut config, Some(net_config_dir)) + .and_then(move |c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && + f == &dir.join(NODE_KEY_ED25519_FILE) => Ok(()), + _ => Err(error::Error::Input("Unexpected node key config".into())) + }) + }) + } + + assert!(no_config_dir().is_ok()); + assert!(some_config_dir(&PathBuf::from_str("x").unwrap()).is_ok()); + } +} diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..ad64d757dcb61acb3fb6928caebfff9c8c778015 --- /dev/null +++ b/client/cli/src/params/pruning_params.rs @@ -0,0 +1,69 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use structopt::StructOpt; +use sc_service::{Configuration, RuntimeGenesis, PruningMode}; + +use crate::error; + +/// Parameters to define the pruning mode +#[derive(Debug, StructOpt, Clone)] +pub struct PruningParams { + /// Specify the state pruning mode, a number of blocks to keep or 'archive'. + /// + /// Default is to keep all block states if the node is running as a + /// validator (i.e. 'archive'), otherwise state is only kept for the last + /// 256 blocks. + #[structopt(long = "pruning", value_name = "PRUNING_MODE")] + pub pruning: Option, +} + +impl PruningParams { + /// Put block pruning CLI params into `config` object. + pub fn update_config( + &self, + mut config: &mut Configuration, + role: sc_service::Roles, + unsafe_pruning: bool, + ) -> error::Result<()> + where + G: RuntimeGenesis, + { + // by default we disable pruning if the node is an authority (i.e. + // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the + // node is an authority and pruning is enabled explicitly, then we error + // unless `unsafe_pruning` is set. + config.pruning = match &self.pruning { + Some(ref s) if s == "archive" => PruningMode::ArchiveAll, + None if role == sc_service::Roles::AUTHORITY => PruningMode::ArchiveAll, + None => PruningMode::default(), + Some(s) => { + if role == sc_service::Roles::AUTHORITY && !unsafe_pruning { + return Err(error::Error::Input( + "Validators should run with state pruning disabled (i.e. archive). \ + You can ignore this check with `--unsafe-pruning`.".to_string() + )); + } + + PruningMode::keep_blocks(s.parse() + .map_err(|_| error::Error::Input("Invalid pruning mode specified".to_string()))? + ) + }, + }; + + Ok(()) + } +} diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..03f44796460a28b138fdf9f3a91754a3b685c354 --- /dev/null +++ b/client/cli/src/params/shared_params.rs @@ -0,0 +1,116 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use std::path::PathBuf; +use structopt::StructOpt; +use app_dirs::{AppInfo, AppDataType}; +use sc_service::{ + Configuration, ChainSpecExtension, RuntimeGenesis, + config::DatabaseConfig, ChainSpec, +}; + +use crate::VersionInfo; +use crate::error; + +/// default sub directory to store database +const DEFAULT_DB_CONFIG_PATH : &'static str = "db"; + +/// Shared parameters used by all `CoreParams`. +#[derive(Debug, StructOpt, Clone)] +pub struct SharedParams { + /// Specify the chain specification (one of dev, local or staging). + #[structopt(long = "chain", value_name = "CHAIN_SPEC")] + pub chain: Option, + + /// Specify the development chain. + #[structopt(long = "dev")] + pub dev: bool, + + /// Specify custom base path. + #[structopt(long = "base-path", short = "d", value_name = "PATH", parse(from_os_str))] + pub base_path: Option, + + /// Sets a custom logging filter. + #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] + pub log: Option, +} + +impl SharedParams { + /// Load spec to `Configuration` from `SharedParams` and spec factory. + pub fn update_config<'a, G, E, F>( + &self, + mut config: &'a mut Configuration, + spec_factory: F, + version: &VersionInfo, + ) -> error::Result<&'a ChainSpec> where + G: RuntimeGenesis, + E: ChainSpecExtension, + F: FnOnce(&str) -> Result>, String>, + { + let chain_key = match self.chain { + Some(ref chain) => chain.clone(), + None => if self.dev { "dev".into() } else { "".into() } + }; + let spec = match spec_factory(&chain_key)? { + Some(spec) => spec, + None => ChainSpec::from_json_file(PathBuf::from(chain_key))? + }; + + config.network.boot_nodes = spec.boot_nodes().to_vec(); + config.telemetry_endpoints = spec.telemetry_endpoints().clone(); + + config.chain_spec = Some(spec); + + if config.config_dir.is_none() { + config.config_dir = Some(base_path(self, version)); + } + + if config.database.is_none() { + config.database = Some(DatabaseConfig::Path { + path: config + .in_chain_config_dir(DEFAULT_DB_CONFIG_PATH) + .expect("We provided a base_path/config_dir."), + cache_size: None, + }); + } + + Ok(config.chain_spec.as_ref().unwrap()) + } + + /// Initialize substrate. This must be done only once. + /// + /// This method: + /// + /// 1. Set the panic handler + /// 2. Raise the FD limit + /// 3. Initialize the logger + pub fn init(&self, version: &VersionInfo) -> error::Result<()> { + crate::init(self.log.as_ref().map(|v| v.as_ref()).unwrap_or(""), version) + } +} + +fn base_path(cli: &SharedParams, version: &VersionInfo) -> PathBuf { + cli.base_path.clone() + .unwrap_or_else(|| + app_dirs::get_app_root( + AppDataType::UserData, + &AppInfo { + name: version.executable_name, + author: version.author + } + ).expect("app directories exist on all supported platforms; qed") + ) +} diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs new file mode 100644 index 0000000000000000000000000000000000000000..80c591d1d2dcf2454d5aa059c0c7098744429b0c --- /dev/null +++ b/client/cli/src/params/transaction_pool_params.rs @@ -0,0 +1,49 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use structopt::StructOpt; +use sc_service::Configuration; +use crate::error; + +/// Parameters used to create the pool configuration. +#[derive(Debug, StructOpt, Clone)] +pub struct TransactionPoolParams { + /// Maximum number of transactions in the transaction pool. + #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] + pub pool_limit: usize, + /// Maximum number of kilobytes of all transactions stored in the pool. + #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "20480")] + pub pool_kbytes: usize, +} + +impl TransactionPoolParams { + /// Fill the given `PoolConfiguration` by looking at the cli parameters. + pub fn update_config( + &self, + config: &mut Configuration, + ) -> error::Result<()> { + // ready queue + config.transaction_pool.ready.count = self.pool_limit; + config.transaction_pool.ready.total_bytes = self.pool_kbytes * 1024; + + // future queue + let factor = 10; + config.transaction_pool.future.count = self.pool_limit / factor; + config.transaction_pool.future.total_bytes = self.pool_kbytes * 1024 / factor; + + Ok(()) + } +} diff --git a/client/consensus/aura/Cargo.toml b/client/consensus/aura/Cargo.toml index e67f1e15a3e76f6d2cd2d9a6820e774583ad947b..be610c16d99b50f57563ea74d3b729770cca4f6d 100644 --- a/client/consensus/aura/Cargo.toml +++ b/client/consensus/aura/Cargo.toml @@ -1,44 +1,44 @@ [package] name = "sc-consensus-aura" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Aura consensus algorithm for substrate" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8", path = "../../../primitives/consensus/aura" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sc-client = { version = "0.8", path = "../../" } -sc-client-api = { version = "2.0.0", path = "../../api" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/aura" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sc-client = { version = "0.8.0-alpha.2", path = "../../" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } derive_more = "0.99.2" futures = "0.3.1" futures-timer = "3.0.1" -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../keystore" } log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sc-consensus-slots = { version = "0.8", path = "../slots" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-version = { version = "2.0.0-alpha.2", path = "../../../primitives/version" } +sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8", path = "../../executor" } -sc-network = { version = "0.8", path = "../../network" } -sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8", path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -tokio = "0.1.22" +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../executor" } +sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } +sc-service = { version = "0.8.0-alpha.2", path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } env_logger = "0.7.0" tempfile = "3.1.0" -futures01 = { package = "futures", version = "0.1" } diff --git a/client/consensus/aura/src/digest.rs b/client/consensus/aura/src/digests.rs similarity index 100% rename from client/consensus/aura/src/digest.rs rename to client/consensus/aura/src/digests.rs diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index db872f28c1b445db8d0c49baa81288477e03a712..5850228628547d061decc0263d7ef9f8ab30bd4a 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -79,33 +79,23 @@ pub use sp_consensus_aura::{ }, }; pub use sp_consensus::SyncOracle; -pub use digest::CompatibleDigestItem; +pub use digests::CompatibleDigestItem; -mod digest; +mod digests; type AuthorityId

=

::Public; -/// A slot duration. Create with `get_or_compute`. -#[derive(Clone, Copy, Debug, Encode, Decode, Hash, PartialOrd, Ord, PartialEq, Eq)] -pub struct SlotDuration(sc_consensus_slots::SlotDuration); - -impl SlotDuration { - /// Either fetch the slot duration from disk or compute it from the genesis - /// state. - pub fn get_or_compute(client: &C) -> CResult - where - A: Codec, - B: BlockT, - C: AuxStore + ProvideRuntimeApi, - C::Api: AuraApi, - { - sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)).map(Self) - } +/// Slot duration type for Aura. +pub type SlotDuration = sc_consensus_slots::SlotDuration; - /// Get the slot duration in milliseconds. - pub fn get(&self) -> u64 { - self.0.get() - } +/// Get type of `SlotDuration` for Aura. +pub fn slot_duration(client: &C) -> CResult where + A: Codec, + B: BlockT, + C: AuxStore + ProvideRuntimeApi, + C::Api: AuraApi, +{ + SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)) } /// Get slot author for given block along with authorities. @@ -179,10 +169,10 @@ pub fn start_aura( }; register_aura_inherent_data_provider( &inherent_data_providers, - slot_duration.0.slot_duration() + slot_duration.slot_duration() )?; Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _>( - slot_duration.0, + slot_duration, select_chain, worker, sync_oracle, @@ -415,21 +405,17 @@ fn find_pre_digest(header: &B::Header) -> Result( +fn check_header( client: &C, slot_now: u64, mut header: B::Header, hash: B::Hash, authorities: &[AuthorityId

], - _transaction_pool: Option<&T>, ) -> Result)>, Error> where DigestItemFor: CompatibleDigestItem

, P::Signature: Decode, C: sc_client_api::backend::AuxStore, P::Public: Encode + Decode + PartialEq + Clone, - T: Send + Sync + 'static, { let seal = match header.digest_mut().pop() { Some(x) => x, @@ -479,14 +465,13 @@ fn check_header( } /// A verifier for Aura blocks. -pub struct AuraVerifier { +pub struct AuraVerifier { client: Arc, phantom: PhantomData

, inherent_data_providers: sp_inherents::InherentDataProviders, - transaction_pool: Option>, } -impl AuraVerifier +impl AuraVerifier where P: Send + Sync + 'static { fn check_inherents( @@ -541,7 +526,7 @@ impl AuraVerifier } #[forbid(deprecated)] -impl Verifier for AuraVerifier where +impl Verifier for AuraVerifier where C: ProvideRuntimeApi + Send + Sync + @@ -553,7 +538,6 @@ impl Verifier for AuraVerifier where P: Pair + Send + Sync + 'static, P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, P::Signature: Encode + Decode, - T: Send + Sync + 'static, { fn verify( &mut self, @@ -575,13 +559,12 @@ impl Verifier for AuraVerifier where // we add one to allow for some small drift. // FIXME #1019 in the future, alter this queue to allow deferring of // headers - let checked_header = check_header::( + let checked_header = check_header::( &self.client, slot_now + 1, header, hash, &authorities[..], - self.transaction_pool.as_ref().map(|x| &**x), ).map_err(|e| e.to_string())?; match checked_header { CheckedHeader::Checked(pre_header, (slot_num, seal)) => { @@ -805,14 +788,13 @@ impl BlockImport for AuraBlockImport( +pub fn import_queue( slot_duration: SlotDuration, block_import: I, justification_import: Option>, finality_proof_import: Option>, client: Arc, inherent_data_providers: InherentDataProviders, - transaction_pool: Option>, ) -> Result>, sp_consensus::Error> where B: BlockT, C::Api: BlockBuilderApi + AuraApi> + ApiExt, @@ -822,7 +804,6 @@ pub fn import_queue( P: Pair + Send + Sync + 'static, P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, P::Signature: Encode + Decode, - T: Send + Sync + 'static, { register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; initialize_authorities_cache(&*client)?; @@ -831,7 +812,6 @@ pub fn import_queue( client: client.clone(), inherent_data_providers, phantom: PhantomData, - transaction_pool, }; Ok(BasicQueue::new( verifier, @@ -849,10 +829,10 @@ mod tests { use sp_runtime::traits::{Block as BlockT, DigestFor}; use sc_network::config::ProtocolConfig; use parking_lot::Mutex; - use tokio::runtime::current_thread; use sp_keyring::sr25519::Keyring; use sc_client::BlockchainEvents; use sp_consensus_aura::sr25519::AuthorityPair; + use std::task::Poll; type Error = sp_blockchain::Error; @@ -906,12 +886,11 @@ mod tests { const SLOT_DURATION: u64 = 1000; pub struct AuraTestNet { - peers: Vec>, + peers: Vec>, } impl TestNetFactory for AuraTestNet { - type Specialization = DummySpecialization; - type Verifier = AuraVerifier; + type Verifier = AuraVerifier; type PeerData = (); /// Create new test network with peers and given config. @@ -926,8 +905,7 @@ mod tests { { match client { PeersClient::Full(client, _) => { - let slot_duration = SlotDuration::get_or_compute(&*client) - .expect("slot duration available"); + let slot_duration = slot_duration(&*client).expect("slot duration available"); let inherent_data_providers = InherentDataProviders::new(); register_aura_inherent_data_provider( &inherent_data_providers, @@ -938,7 +916,6 @@ mod tests { AuraVerifier { client, inherent_data_providers, - transaction_pool: Default::default(), phantom: Default::default(), } }, @@ -946,15 +923,15 @@ mod tests { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -973,8 +950,8 @@ mod tests { let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); + let mut aura_futures = Vec::new(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut keystore_paths = Vec::new(); for (peer_id, key) in peers { let mut net = net.lock(); @@ -995,15 +972,14 @@ mod tests { .for_each(move |_| future::ready(())) ); - let slot_duration = SlotDuration::get_or_compute(&*client) - .expect("slot duration available"); + let slot_duration = slot_duration(&*client).expect("slot duration available"); let inherent_data_providers = InherentDataProviders::new(); register_aura_inherent_data_provider( &inherent_data_providers, slot_duration.get() ).expect("Registers aura inherent data provider"); - let aura = start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( + aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( slot_duration, client.clone(), select_chain, @@ -1014,21 +990,19 @@ mod tests { false, keystore, sp_consensus::AlwaysCanAuthor, - ) - .expect("Starts aura") - .unit_error() - .compat(); - - runtime.spawn(aura); + ).expect("Starts aura")); } - runtime.spawn(futures01::future::poll_fn(move || { - net.lock().poll(); - Ok::<_, ()>(futures01::Async::NotReady::<()>) - })); - - runtime.block_on(future::join_all(import_notifications) - .unit_error().compat()).unwrap(); + futures::executor::block_on(future::select( + future::poll_fn(move |cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }), + future::select( + future::join_all(aura_futures), + future::join_all(import_notifications) + ) + )); } #[test] diff --git a/client/consensus/babe/Cargo.toml b/client/consensus/babe/Cargo.toml index 3f5487885fea0e11ba758d421589eeda6a007500..66455adbdf8284a822a98a4bf4a2314964445fe5 100644 --- a/client/consensus/babe/Cargo.toml +++ b/client/consensus/babe/Cargo.toml @@ -1,37 +1,41 @@ [package] name = "sc-consensus-babe" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "BABE consensus algorithm for substrate" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-consensus-babe" + [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../primitives/application-crypto" } num-bigint = "0.2.3" num-rational = "0.2.2" num-traits = "0.2.8" serde = { version = "1.0.104", features = ["derive"] } -sp-version = { version = "2.0.0", path = "../../../primitives/version" } -sp-io = { version = "2.0.0", path = "../../../primitives/io" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sc-keystore = { version = "2.0.0", path = "../../keystore" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sc-client = { version = "0.8", path = "../../" } -sc-consensus-epochs = { version = "0.8", path = "../epochs" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-consensus-uncles = { version = "0.8", path = "../uncles" } -sc-consensus-slots = { version = "0.8", path = "../slots" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } +sp-version = { version = "2.0.0-alpha.2", path = "../../../primitives/version" } +sp-io = { version = "2.0.0-alpha.2", path = "../../../primitives/io" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../keystore" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../" } +sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../epochs" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sc-consensus-uncles = { version = "0.8.0-alpha.2", path = "../uncles" } +sc-consensus-slots = { version = "0.8.0-alpha.2", path = "../slots" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" @@ -43,17 +47,15 @@ pdqselect = "0.1.0" derive_more = "0.99.2" [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../../primitives/keyring" } -sc-executor = { version = "0.8", path = "../../executor" } -sc-network = { version = "0.8", path = "../../network" } -sc-network-test = { version = "0.8.0", path = "../../network/test" } -sc-service = { version = "0.8", path = "../../service" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sc-block-builder = { version = "0.8", path = "../../block-builder" } -tokio = "0.1.22" +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../primitives/keyring" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../executor" } +sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sc-network-test = { version = "0.8.0-dev", path = "../../network/test" } +sc-service = { version = "0.8.0-alpha.2", path = "../../service" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../../block-builder" } env_logger = "0.7.0" tempfile = "3.1.0" -futures01 = { package = "futures", version = "0.1" } [features] test-helpers = [] diff --git a/client/consensus/babe/rpc/Cargo.toml b/client/consensus/babe/rpc/Cargo.toml index 3fd0e924af092231d1103471bec8231f40acb872..3f2cc4f0bcbce8913ca9183815edd5cffe39928f 100644 --- a/client/consensus/babe/rpc/Cargo.toml +++ b/client/consensus/babe/rpc/Cargo.toml @@ -1,30 +1,32 @@ [package] name = "sc-consensus-babe-rpc" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "RPC extensions for the BABE consensus algorithm" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-consensus-babe = { version = "0.8.0", path = "../" } +sc-consensus-babe = { version = "0.8.0-alpha.2", path = "../" } jsonrpc-core = "14.0.3" jsonrpc-core-client = "14.0.3" jsonrpc-derive = "14.0.3" -sp-consensus-babe = { version = "0.8", path = "../../../../primitives/consensus/babe" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../../primitives/consensus/babe" } serde = { version = "1.0.104", features=["derive"] } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sc-consensus-epochs = { version = "0.8", path = "../../epochs" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../../primitives/runtime" } +sc-consensus-epochs = { version = "0.8.0-alpha.2", path = "../../epochs" } futures = "0.3.1" derive_more = "0.99.2" -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -sp-consensus = { version = "0.8", path = "../../../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sc-keystore = { version = "2.0.0", path = "../../../keystore" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../../primitives/api" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../../primitives/consensus/common" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../../primitives/core" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../../../keystore" } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } -sp-application-crypto = { version = "2.0.0", path = "../../../../primitives/application-crypto" } -sp-keyring = { version = "2.0.0", path = "../../../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../../test-utils/runtime/client" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../../../primitives/application-crypto" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../../../primitives/keyring" } tempfile = "3.1.0" diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index 033a7d6b98530aa8f55ad4b224931d13188a1cd6..1ea7e423dc7a90c7b975cc0788284647106bfabf 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -230,7 +230,6 @@ mod tests { config.clone(), client.clone(), client.clone(), - client.clone(), ).expect("can initialize block-import"); let epoch_changes = link.epoch_changes().clone(); diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 4654f91b89876a9858751e8771bbb3fd0a69b022..a01ea63bbe111ef193d65de6a4264d7eb8a5ede4 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -86,16 +86,15 @@ pub(super) fn secondary_slot_author( Some(&expected_author.0) } -#[allow(deprecated)] pub(super) fn make_transcript( randomness: &[u8], slot_number: u64, epoch: u64, ) -> Transcript { let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.commit_bytes(b"slot number", &slot_number.to_le_bytes()); - transcript.commit_bytes(b"current epoch", &epoch.to_le_bytes()); - transcript.commit_bytes(b"chain randomness", randomness); + transcript.append_u64(b"slot number", slot_number); + transcript.append_u64(b"current epoch", epoch); + transcript.append_message(b"chain randomness", randomness); transcript } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index b93619b29c885216fbf39ff2cffc694889dc18b3..967a78e7bfc882debba29d27f1b8187338a0fea1 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -93,12 +93,9 @@ use sp_consensus_babe::inherents::BabeInherentData; use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; use sc_client_api::{ - backend::{AuxStore, Backend}, - call_executor::CallExecutor, + backend::AuxStore, BlockchainEvents, ProvideUncles, }; -use sc_client::Client; - use sp_block_builder::BlockBuilder as BlockBuilderApi; use futures::prelude::*; @@ -321,7 +318,7 @@ pub struct BabeParams { pub can_author_with: CAW, } -/// Start the babe worker. The returned future should be run in a tokio runtime. +/// Start the babe worker. pub fn start_babe(BabeParams { keystore, client, @@ -655,27 +652,28 @@ impl BabeLink { } /// A verifier for Babe blocks. -pub struct BabeVerifier { - client: Arc>, - api: Arc, +pub struct BabeVerifier { + client: Arc, inherent_data_providers: sp_inherents::InherentDataProviders, config: Config, epoch_changes: SharedEpochChanges, time_source: TimeSource, } -impl BabeVerifier { +impl BabeVerifier + where + Block: BlockT, + Client: HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi, +{ fn check_inherents( &self, block: Block, block_id: BlockId, inherent_data: InherentData, ) -> Result<(), Error> - where - PRA: ProvideRuntimeApi, - PRA::Api: BlockBuilderApi { - let inherent_res = self.api.runtime_api().check_inherents( + let inherent_res = self.client.runtime_api().check_inherents( &block_id, block, inherent_data, @@ -693,57 +691,11 @@ impl BabeVerifier { } } -#[allow(dead_code)] -fn median_algorithm( - median_required_blocks: u64, - slot_duration: u64, - slot_number: u64, - slot_now: u64, - time_source: &mut (Option, Vec<(Instant, u64)>), -) { - let num_timestamps = time_source.1.len(); - if num_timestamps as u64 >= median_required_blocks && median_required_blocks > 0 { - let mut new_list: Vec<_> = time_source.1.iter().map(|&(t, sl)| { - let offset: u128 = u128::from(slot_duration) - .checked_mul(1_000_000u128) // self.config.slot_duration returns milliseconds - .and_then(|x| { - x.checked_mul(u128::from(slot_number).saturating_sub(u128::from(sl))) - }) - .expect("we cannot have timespans long enough for this to overflow; qed"); - - const NANOS_PER_SEC: u32 = 1_000_000_000; - let nanos = (offset % u128::from(NANOS_PER_SEC)) as u32; - let secs = (offset / u128::from(NANOS_PER_SEC)) as u64; - - t + Duration::new(secs, nanos) - }).collect(); - - // Use a partial sort to move the median timestamp to the middle of the list - pdqselect::select(&mut new_list, num_timestamps / 2); - - let &median = new_list - .get(num_timestamps / 2) - .expect("we have at least one timestamp, so this is a valid index; qed"); - - let now = Instant::now(); - if now >= median { - time_source.0.replace(now - median); - } - - time_source.1.clear(); - } else { - time_source.1.push((Instant::now(), slot_now)) - } -} - -impl Verifier for BabeVerifier where +impl Verifier for BabeVerifier where Block: BlockT, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, - PRA: ProvideRuntimeApi + Send + Sync + AuxStore + ProvideCache, - PRA::Api: BlockBuilderApi - + BabeApi, + Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi + + Send + Sync + AuxStore + ProvideCache, + Client::Api: BlockBuilderApi + BabeApi, { fn verify( &mut self, @@ -812,7 +764,7 @@ impl Verifier for BabeVerifier { +pub struct BabeBlockImport { inner: I, - client: Arc>, - api: Arc, + client: Arc, epoch_changes: SharedEpochChanges, config: Config, } -impl Clone for BabeBlockImport { +impl Clone for BabeBlockImport { fn clone(&self) -> Self { BabeBlockImport { inner: self.inner.clone(), client: self.client.clone(), - api: self.api.clone(), epoch_changes: self.epoch_changes.clone(), config: self.config.clone(), } } } -impl BabeBlockImport { +impl BabeBlockImport { fn new( - client: Arc>, - api: Arc, + client: Arc, epoch_changes: SharedEpochChanges, block_import: I, config: Config, ) -> Self { BabeBlockImport { client, - api, inner: block_import, epoch_changes, config, @@ -938,19 +886,16 @@ impl BabeBlockImport { } } -impl BlockImport for BabeBlockImport where +impl BlockImport for BabeBlockImport where Block: BlockT, - I: BlockImport> + Send + Sync, - I::Error: Into, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - Client: AuxStore, - RA: Send + Sync, - PRA: ProvideRuntimeApi + ProvideCache, - PRA::Api: BabeApi + ApiExt, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + HeaderMetadata + + AuxStore + ProvideRuntimeApi + ProvideCache + Send + Sync, + Client::Api: BabeApi + ApiExt, { type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; + type Transaction = sp_api::TransactionFor; fn import_block( &mut self, @@ -974,7 +919,7 @@ impl BlockImport for BabeBlockImport::ParentUnavailable(parent_hash, hash) @@ -1046,7 +991,7 @@ impl BlockImport for BabeBlockImport BlockImport for BabeBlockImport BlockImport for BabeBlockImport( - client: &Client, +fn prune_finalized( + client: Arc, epoch_changes: &mut EpochChangesFor, ) -> Result<(), ConsensusError> where Block: BlockT, - E: CallExecutor + Send + Sync, - B: Backend, - RA: Send + Sync, + Client: HeaderBackend + HeaderMetadata, { - let info = client.chain_info(); + let info = client.info(); let finalized_slot = { - let finalized_header = client.header(&BlockId::Hash(info.finalized_hash)) + let finalized_header = client.header(BlockId::Hash(info.finalized_hash)) .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? .expect("best finalized hash was given by client; \ finalized headers must exist in db; qed"); @@ -1190,16 +1133,12 @@ fn prune_finalized( /// /// Also returns a link object used to correctly instantiate the import queue /// and background worker. -pub fn block_import( +pub fn block_import( config: Config, wrapped_block_import: I, - client: Arc>, - api: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, - Client: AuxStore, + client: Arc, +) -> ClientResult<(BabeBlockImport, BabeLink)> where + Client: AuxStore + HeaderBackend + HeaderMetadata, { let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; let link = BabeLink { @@ -1212,13 +1151,12 @@ pub fn block_import( // epoch tree it is useful as a migration, so that nodes prune long trees on // startup rather than waiting until importing the next epoch change block. prune_finalized( - &client, + client.clone(), &mut epoch_changes.lock(), )?; let import = BabeBlockImport::new( client, - api, epoch_changes, wrapped_block_import, config, @@ -1236,28 +1174,24 @@ pub fn block_import( /// /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. -pub fn import_queue( +pub fn import_queue( babe_link: BabeLink, - block_import: I, + block_import: Inner, justification_import: Option>, finality_proof_import: Option>, - client: Arc>, - api: Arc, + client: Arc, inherent_data_providers: InherentDataProviders, -) -> ClientResult>> where - B: Backend + 'static, - I: BlockImport> +) -> ClientResult>> where + Inner: BlockImport> + Send + Sync + 'static, - E: CallExecutor + Clone + Send + Sync + 'static, - RA: Send + Sync + 'static, - PRA: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, - PRA::Api: BlockBuilderApi + BabeApi + ApiExt, + Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, + Client: HeaderBackend + HeaderMetadata, + Client::Api: BlockBuilderApi + BabeApi + ApiExt, { register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; let verifier = BabeVerifier { client: client.clone(), - api, inherent_data_providers, config: babe_link.config, epoch_changes: babe_link.epoch_changes, diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 6c1ffa2c3af05109701a361a5f463cf4b3b71d50..a5493918f001de75f828eaa573cbafb9c2fcb8f6 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -23,7 +23,7 @@ use super::*; use authorship::claim_slot; use sp_consensus_babe::{AuthorityPair, SlotNumber}; -use sc_block_builder::BlockBuilder; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sp_consensus::{ NoNetwork as DummyOracle, Proposal, RecordProof, import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, @@ -32,10 +32,9 @@ use sc_network_test::*; use sc_network_test::{Block as TestBlock, PeersClient}; use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; -use tokio::runtime::current_thread; use sc_client_api::{BlockchainEvents, backend::TransactionFor}; use log::debug; -use std::{time::Duration, cell::RefCell}; +use std::{time::Duration, cell::RefCell, task::Poll}; type Item = DigestItem; @@ -199,20 +198,14 @@ impl> BlockImport for PanickingBlockImport< } pub struct BabeTestNet { - peers: Vec, DummySpecialization>>, + peers: Vec>>, } type TestHeader = ::Header; type TestExtrinsic = ::Extrinsic; pub struct TestVerifier { - inner: BabeVerifier< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi, - PeersFullClient, - >, + inner: BabeVerifier, mutator: Mutator, } @@ -242,7 +235,6 @@ pub struct PeerData { } impl TestNetFactory for BabeTestNet { - type Specialization = DummySpecialization; type Verifier = TestVerifier; type PeerData = Option; @@ -271,7 +263,6 @@ impl TestNetFactory for BabeTestNet { config, client.clone(), client.clone(), - client.clone(), ).expect("can initialize block-import"); let block_import = PanickingBlockImport(block_import); @@ -305,7 +296,6 @@ impl TestNetFactory for BabeTestNet { TestVerifier { inner: BabeVerifier { client: client.clone(), - api: client, inherent_data_providers: data.inherent_data_providers.clone(), config: data.link.config.clone(), epoch_changes: data.link.epoch_changes.clone(), @@ -315,17 +305,17 @@ impl TestNetFactory for BabeTestNet { } } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { trace!(target: "babe", "Retrieving a peer"); &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { trace!(target: "babe", "Retrieving peers"); &self.peers } - fn mut_peers>)>( + fn mut_peers>)>( &mut self, closure: F, ) { @@ -363,7 +353,7 @@ fn run_one_test( let net = Arc::new(Mutex::new(net)); let mut import_notifications = Vec::new(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut babe_futures = Vec::new(); let mut keystore_paths = Vec::new(); for (peer_id, seed) in peers { @@ -408,7 +398,7 @@ fn run_one_test( ); - runtime.spawn(start_babe(BabeParams { + babe_futures.push(start_babe(BabeParams { block_import: data.block_import.lock().take().expect("import set up during init"), select_chain, client, @@ -419,23 +409,23 @@ fn run_one_test( babe_link: data.link.clone(), keystore, can_author_with: sp_consensus::AlwaysCanAuthor, - }).expect("Starts babe").unit_error().compat()); + }).expect("Starts babe")); } - runtime.spawn(futures01::future::poll_fn(move || { - let mut net = net.lock(); - net.poll(); - for p in net.peers() { - for (h, e) in p.failed_verifications() { - panic!("Verification failed for {:?}: {}", h, e); + futures::executor::block_on(future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + for p in net.peers() { + for (h, e) in p.failed_verifications() { + panic!("Verification failed for {:?}: {}", h, e); + } } - } - - Ok::<_, ()>(futures01::Async::NotReady::<()>) - })); - - runtime.block_on(future::join_all(import_notifications) - .unit_error().compat()).unwrap(); + + Poll::<()>::Pending + }), + future::select(future::join_all(import_notifications), future::join_all(babe_futures)) + )); } #[test] diff --git a/client/consensus/epochs/Cargo.toml b/client/consensus/epochs/Cargo.toml index e08553a241d1c41e7c8d175bcaedd3fc03961610..fe2365429892121a4092a1f63021725af6cf5028 100644 --- a/client/consensus/epochs/Cargo.toml +++ b/client/consensus/epochs/Cargo.toml @@ -1,14 +1,17 @@ [package] name = "sc-consensus-epochs" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Generic epochs-based utilities for consensus" edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } parking_lot = "0.10.0" -fork-tree = { version = "2.0.0", path = "../../../utils/fork-tree" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sc-client-api = { path = "../../api" } +fork-tree = { version = "2.0.0-alpha.2", path = "../../../utils/fork-tree" } +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.2"} +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sc-client-api = { path = "../../api" , version = "2.0.0-alpha.2"} diff --git a/client/consensus/manual-seal/Cargo.toml b/client/consensus/manual-seal/Cargo.toml index 71c1f6e5583a27edb96b6d7710a31112ce6f169d..11fee0e3f9e608c73e716f5ac8a84480140e31d1 100644 --- a/client/consensus/manual-seal/Cargo.toml +++ b/client/consensus/manual-seal/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "sc-consensus-manual-seal" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Manual sealing engine for Substrate" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] derive_more = "0.99.2" @@ -16,19 +18,19 @@ log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0", features=["derive"] } -sc-client = { path = "../../../client" } -sc-client-api = { path = "../../../client/api" } -sc-transaction-pool = { path = "../../transaction-pool" } -sp-blockchain = { path = "../../../primitives/blockchain" } -sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common" } -sp-inherents = { path = "../../../primitives/inherents" } -sp-runtime = { path = "../../../primitives/runtime" } -sp-transaction-pool = { path = "../../../primitives/transaction-pool" } +sc-client = { path = "../../../client" , version = "0.8.0-alpha.2"} +sc-client-api = { path = "../../../client/api" , version = "2.0.0-alpha.2"} +sc-transaction-pool = { path = "../../transaction-pool" , version = "2.0.0-alpha.2"} +sp-blockchain = { path = "../../../primitives/blockchain" , version = "2.0.0-alpha.2"} +sp-consensus = { package = "sp-consensus", path = "../../../primitives/consensus/common" , version = "0.8.0-alpha.2"} +sp-inherents = { path = "../../../primitives/inherents" , version = "2.0.0-alpha.2"} +sp-runtime = { path = "../../../primitives/runtime" , version = "2.0.0-alpha.2"} +sp-transaction-pool = { path = "../../../primitives/transaction-pool" , version = "2.0.0-alpha.2"} [dev-dependencies] -sc-basic-authorship = { path = "../../basic-authorship" } -substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" } -substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" } +sc-basic-authorship = { path = "../../basic-authorship" , version = "0.8.0-alpha.2"} +substrate-test-runtime-client = { path = "../../../test-utils/runtime/client" , version = "2.0.0-dev"} +substrate-test-runtime-transaction-pool = { path = "../../../test-utils/runtime/transaction-pool" , version = "2.0.0-dev"} tokio = { version = "0.2", features = ["rt-core", "macros"] } env_logger = "0.7.0" tempfile = "3.1.0" diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index c4336485a14acbe3ac1c6d7b9493a80bd577e5a8..18dc91ad34d625fd6b7ce661bea7a56aeebdcc38 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -244,10 +244,10 @@ mod tests { let select_chain = LongestChain::new(backend.clone()); let inherent_data_providers = InherentDataProviders::new(); let pool = Arc::new(BasicPool::new(Options::default(), api()).0); - let env = ProposerFactory { - transaction_pool: pool.clone(), - client: client.clone(), - }; + let env = ProposerFactory::new( + client.clone(), + pool.clone() + ); // this test checks that blocks are created as soon as transactions are imported into the pool. let (sender, receiver) = futures::channel::oneshot::channel(); let mut sender = Arc::new(Some(sender)); @@ -309,10 +309,10 @@ mod tests { let select_chain = LongestChain::new(backend.clone()); let inherent_data_providers = InherentDataProviders::new(); let pool = Arc::new(BasicPool::new(Options::default(), api()).0); - let env = ProposerFactory { - transaction_pool: pool.clone(), - client: client.clone(), - }; + let env = ProposerFactory::new( + client.clone(), + pool.clone() + ); // this test checks that blocks are created as soon as an engine command is sent over the stream. let (mut sink, stream) = futures::channel::mpsc::channel(1024); let future = run_manual_seal( @@ -378,10 +378,10 @@ mod tests { let inherent_data_providers = InherentDataProviders::new(); let pool_api = api(); let pool = Arc::new(BasicPool::new(Options::default(), pool_api.clone()).0); - let env = ProposerFactory { - transaction_pool: pool.clone(), - client: client.clone(), - }; + let env = ProposerFactory::new( + client.clone(), + pool.clone(), + ); // this test checks that blocks are created as soon as an engine command is sent over the stream. let (mut sink, stream) = futures::channel::mpsc::channel(1024); let future = run_manual_seal( diff --git a/client/consensus/pow/Cargo.toml b/client/consensus/pow/Cargo.toml index b31d9406e1e852101016f1af51a9ef9c9cafaaf4..f95d196b62f7a94fec013416252456e50395b02f 100644 --- a/client/consensus/pow/Cargo.toml +++ b/client/consensus/pow/Cargo.toml @@ -1,23 +1,25 @@ [package] name = "sc-consensus-pow" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "PoW consensus algorithm for substrate" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-block-builder = { version = "2.0.0", path = "../../../primitives/block-builder" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } -sp-consensus-pow = { version = "0.8", path = "../../../primitives/consensus/pow" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../../../primitives/block-builder" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } +sp-consensus-pow = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/pow" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } log = "0.4.8" futures = { version = "0.3.1", features = ["compat"] } -sp-timestamp = { version = "2.0.0", path = "../../../primitives/timestamp" } +sp-timestamp = { version = "2.0.0-alpha.2", path = "../../../primitives/timestamp" } derive_more = "0.99.2" diff --git a/client/consensus/slots/Cargo.toml b/client/consensus/slots/Cargo.toml index 67de0a54ed7b657418a7abb9710d0672e042733a..fe7958b257203c9bf24e0582515e249d3a34a74c 100644 --- a/client/consensus/slots/Cargo.toml +++ b/client/consensus/slots/Cargo.toml @@ -1,27 +1,29 @@ [package] name = "sc-consensus-slots" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Generic slots-based utilities for consensus" edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sc-telemetry = { version = "2.0.0", path = "../../telemetry" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../../telemetry" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } futures = "0.3.1" futures-timer = "3.0.1" parking_lot = "0.10.0" log = "0.4.8" [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 892c8b136436b9fa226f2cbd4617c4729870dede..e23b2fb321a306243aa1bb24b7e580289f28045a 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -37,7 +37,7 @@ use futures_timer::Delay; use sp_inherents::{InherentData, InherentDataProviders}; use log::{debug, error, info, warn}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, HasherFor, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_api::{ProvideRuntimeApi, ApiRef}; use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; @@ -47,7 +47,7 @@ use parking_lot::Mutex; /// /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = - sp_state_machine::StorageChanges, NumberFor>; + sp_state_machine::StorageChanges, NumberFor>; /// A worker that should be invoked at every new slot. pub trait SlotWorker { diff --git a/client/consensus/uncles/Cargo.toml b/client/consensus/uncles/Cargo.toml index f336564c4ec14db9fca99f0e50c2155d81dc35ac..ad325ed79f7a576bd957fd1a06fd5e473eb60394 100644 --- a/client/consensus/uncles/Cargo.toml +++ b/client/consensus/uncles/Cargo.toml @@ -1,16 +1,18 @@ [package] name = "sc-consensus-uncles" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Generic uncle inclusion utilities for consensus" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-authorship = { version = "2.0.0", path = "../../../primitives/authorship" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sp-inherents = { version = "2.0.0", path = "../../../primitives/inherents" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-authorship = { version = "2.0.0-alpha.2", path = "../../../primitives/authorship" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../../primitives/inherents" } log = "0.4.8" diff --git a/client/db/Cargo.toml b/client/db/Cargo.toml index ef418d43efc183facef019a1e9e57912c3951140..8eaae24e5207cd0cd1fa46b079775fd858073dd7 100644 --- a/client/db/Cargo.toml +++ b/client/db/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-client-db" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Client backend that uses RocksDB database as storage." [dependencies] parking_lot = "0.10.0" @@ -15,22 +18,23 @@ kvdb-memorydb = "0.4.0" linked-hash-map = "0.5.2" hash-db = "0.15.2" parity-util-mem = { version = "0.5.1", default-features = false, features = ["std"] } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-client = { version = "0.8", path = "../" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8", path = "../executor" } -sc-state-db = { version = "0.8", path = "../state-db" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-client = { version = "0.8.0-alpha.2", path = "../" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } +sc-state-db = { version = "0.8.0-alpha.2", path = "../state-db" } +sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../../utils/prometheus" } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } env_logger = "0.7.0" quickcheck = "0.9" kvdb-rocksdb = "0.5" diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index 9858a5c148bfaa13f34af57ce4091540e1b39928..b6a6b3f8d499762f8a413a084138e7c6fe600a31 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -24,14 +24,14 @@ use rand::Rng; use hash_db::{Prefix, Hasher}; use sp_trie::{MemoryDB, prefixed_key}; use sp_core::storage::ChildInfo; -use sp_runtime::traits::{Block as BlockT, HasherFor}; +use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; use sp_state_machine::{DBValue, backend::Backend as StateBackend}; use kvdb::{KeyValueDB, DBTransaction}; use kvdb_rocksdb::{Database, DatabaseConfig}; type DbState = sp_state_machine::TrieBackend< - Arc>>, HasherFor + Arc>>, HashFor >; struct StorageDb { @@ -39,9 +39,9 @@ struct StorageDb { _block: std::marker::PhantomData, } -impl sp_state_machine::Storage> for StorageDb { +impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + let key = prefixed_key::>(key, prefix); self.db.get(0, &key) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -53,7 +53,7 @@ pub struct BenchmarkingState { root: Cell, state: RefCell>>, db: Cell>>, - genesis: as StateBackend>>::Transaction, + genesis: as StateBackend>>::Transaction, } impl BenchmarkingState { @@ -64,8 +64,8 @@ impl BenchmarkingState { let path = temp_dir.join(&name); let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + let mut mdb = MemoryDB::>::default(); + sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); std::fs::create_dir(&path).map_err(|_| String::from("Error creating temp dir"))?; let mut state = BenchmarkingState { @@ -108,8 +108,8 @@ impl BenchmarkingState { self.db.set(None); *self.state.borrow_mut() = None; let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + let mut mdb = MemoryDB::>::default(); + sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); self.root.set(root); std::fs::remove_dir_all(&self.path).map_err(|_| "Error removing database dir".into()) @@ -126,10 +126,10 @@ fn state_err() -> String { "State is not open".into() } -impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; +impl StateBackend> for BenchmarkingState { + type Error = as StateBackend>>::Error; + type Transaction = as StateBackend>>::Transaction; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) @@ -244,12 +244,12 @@ impl StateBackend> for BenchmarkingState { } fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> + -> Option<&sp_state_machine::TrieBackend>> { None } - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) + fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) -> Result<(), Self::Error> { if let Some(db) = self.db.take() { diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 99488bbaed0964d4509e38b94c37d68dbe2cfb24..a28cd604fe3633d952dfd10a94ea8df01c4bc975 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -28,7 +28,7 @@ use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, HasherFor, NumberFor, One, Zero, CheckedSub, + Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, }; use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; use sp_state_machine::{DBValue, ChangesTrieBuildCache, ChangesTrieCacheAction}; @@ -150,7 +150,7 @@ impl DbChangesTrieStorage { pub fn commit( &self, tx: &mut DBTransaction, - mut changes_trie: MemoryDB>, + mut changes_trie: MemoryDB>, parent_block: ComplexBlockId, block: ComplexBlockId, new_header: &Block::Header, @@ -377,7 +377,7 @@ impl DbChangesTrieStorage { } impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { + fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { self } @@ -396,7 +396,7 @@ impl PrunableStateChangesTrieStorage for DbChangesTrieStor } } -impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> +impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> for DbChangesTrieStorage { fn build_anchor( @@ -469,12 +469,12 @@ impl sp_state_machine::ChangesTrieRootsStorage, } } -impl sp_state_machine::ChangesTrieStorage, NumberFor> +impl sp_state_machine::ChangesTrieStorage, NumberFor> for DbChangesTrieStorage where Block: BlockT, { - fn as_roots_storage(&self) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { + fn as_roots_storage(&self) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 5173497509cf9ad909813d0baa17df72fe406979..746c73bea2804cfc189632f9a1ec78a7c03154a9 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -46,9 +46,11 @@ use std::path::PathBuf; use std::io; use std::collections::HashMap; -use sc_client_api::{execution_extensions::ExecutionExtensions, ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo}; -use sc_client_api::backend::NewBlockState; -use sc_client_api::backend::PrunableStateChangesTrieStorage; +use sc_client_api::{ + ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize, + execution_extensions::ExecutionExtensions, + backend::{NewBlockState, PrunableStateChangesTrieStorage}, +}; use sp_blockchain::{ Result as ClientResult, Error as ClientError, well_known_cache_keys, HeaderBackend, @@ -65,7 +67,7 @@ use sp_runtime::{ BuildStorage, }; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HasherFor, + Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, }; use sc_executor::RuntimeInfo; use sp_state_machine::{ @@ -78,10 +80,11 @@ use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTra use sc_client::leaves::{LeafSet, FinalizationDisplaced}; use sc_state_db::StateDb; use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; -use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; +use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; use crate::stats::StateUsageStats; use log::{trace, debug, warn}; pub use sc_state_db::PruningMode; +use prometheus_endpoint::Registry; #[cfg(any(feature = "kvdb-rocksdb", test))] pub use bench::BenchmarkingState; @@ -97,7 +100,7 @@ const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HasherFor + Arc>>, HashFor >; /// Re-export the KVDB trait so that one can pass an implementation of it. @@ -137,10 +140,10 @@ impl std::fmt::Debug for RefTrackingState { } } -impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; +impl StateBackend> for RefTrackingState { + type Error = as StateBackend>>::Error; + type Transaction = as StateBackend>>::Transaction; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; fn storage(&self, key: &[u8]) -> Result>, Self::Error> { self.state.storage(key) @@ -249,7 +252,7 @@ impl StateBackend> for RefTrackingState { } fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> + -> Option<&sp_state_machine::TrieBackend>> { self.state.as_trie_backend() } @@ -289,6 +292,7 @@ pub fn new_client( fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, + prometheus_registry: Option, ) -> Result<( sc_client::Client< Backend, @@ -315,6 +319,7 @@ pub fn new_client( fork_blocks, bad_blocks, execution_extensions, + prometheus_registry, )?, backend, )) @@ -518,11 +523,11 @@ impl HeaderMetadata for BlockchainDb { /// Database transaction pub struct BlockImportOperation { - old_state: CachingState, Block>, - db_updates: PrefixedMemoryDB>, + old_state: SyncingCachingState, Block>, + db_updates: PrefixedMemoryDB>, storage_updates: StorageCollection, child_storage_updates: ChildStorageCollection, - changes_trie_updates: MemoryDB>, + changes_trie_updates: MemoryDB>, changes_trie_build_cache_update: Option>>, changes_trie_config_update: Option>, pending_block: Option>, @@ -544,7 +549,7 @@ impl BlockImportOperation { } impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { - type State = CachingState, Block>; + type State = SyncingCachingState, Block>; fn state(&self) -> ClientResult> { Ok(Some(&self.old_state)) @@ -574,7 +579,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc // Currently cache isn't implemented on full nodes. } - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { self.db_updates = update; Ok(()) } @@ -621,7 +626,7 @@ impl sc_client_api::backend::BlockImportOperation for Bloc fn update_changes_trie( &mut self, - update: ChangesTrieTransaction, NumberFor>, + update: ChangesTrieTransaction, NumberFor>, ) -> ClientResult<()> { self.changes_trie_updates = update.0; self.changes_trie_build_cache_update = Some(update.1); @@ -666,9 +671,9 @@ struct StorageDb { pub state_db: StateDb>, } -impl sp_state_machine::Storage> for StorageDb { +impl sp_state_machine::Storage> for StorageDb { fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); + let key = prefixed_key::>(key, prefix); self.state_db.get(&key, self) .map_err(|e| format!("Database backend error: {:?}", e)) } @@ -688,13 +693,13 @@ struct DbGenesisStorage(pub Block::Hash); impl DbGenesisStorage { pub fn new() -> Self { let mut root = Block::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + let mut mdb = MemoryDB::>::default(); + sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); DbGenesisStorage(root) } } -impl sp_state_machine::Storage> for DbGenesisStorage { +impl sp_state_machine::Storage> for DbGenesisStorage { fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { Ok(None) } @@ -750,10 +755,10 @@ pub struct Backend { blockchain: BlockchainDb, canonicalization_delay: u64, shared_cache: SharedCache, - import_lock: RwLock<()>, + import_lock: Arc>, is_archive: bool, io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, - state_usage: StateUsageStats, + state_usage: Arc, } impl Backend { @@ -825,7 +830,7 @@ impl Backend { import_lock: Default::default(), is_archive: is_archive_pruning, io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - state_usage: StateUsageStats::new(), + state_usage: Arc::new(StateUsageStats::new()), }) } @@ -1127,8 +1132,14 @@ impl Backend { self.state_usage.tally_writes(ops, bytes); let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block(&hash, number_u64, &pending_block.header.parent_hash(), changeset) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; + let commit = self.storage.state_db.insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + changeset, + ).map_err(|e: sc_state_db::Error| + sp_blockchain::Error::from(format!("State database error: {:?}", e)) + )?; apply_state_commit(&mut transaction, commit); // Check if need to finalize. Genesis is always finalized instantly. @@ -1156,7 +1167,8 @@ impl Backend { changes_trie_cache_ops, )?); self.state_usage.merge_sm(operation.old_state.usage_info()); - let cache = operation.old_state.release(); // release state reference so that it can be finalized + // release state reference so that it can be finalized + let cache = operation.old_state.into_cache_changes(); if finalized { // TODO: ensure best chain contains this block. @@ -1184,9 +1196,20 @@ impl Backend { displaced_leaf }; - let mut children = children::read_children(&*self.storage.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash)?; + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; children.push(hash); - children::write_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash, children); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); @@ -1196,7 +1219,7 @@ impl Backend { }; let cache_update = if let Some(set_head) = operation.set_head { - if let Some(header) = ::sc_client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { + if let Some(header) = sc_client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { let number = header.number(); let hash = header.hash(); @@ -1266,7 +1289,6 @@ impl Backend { Ok(()) } - // write stuff to a transaction after a new block is finalized. // this canonicalizes finalized blocks. Fails if called with a block which // was not a child of the last finalized block. @@ -1354,11 +1376,13 @@ impl sc_client_api::backend::AuxStore for Backend where Block: Blo impl sc_client_api::backend::Backend for Backend { type BlockImportOperation = BlockImportOperation; type Blockchain = BlockchainDb; - type State = CachingState, Block>; + type State = SyncingCachingState, Block>; type OffchainStorage = offchain::LocalStorage; fn begin_operation(&self) -> ClientResult { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; + let mut old_state = self.state_at(BlockId::Hash(Default::default()))?; + old_state.disable_syncing(); + Ok(BlockImportOperation { pending_block: None, old_state, @@ -1381,13 +1405,13 @@ impl sc_client_api::backend::Backend for Backend { block: BlockId, ) -> ClientResult<()> { operation.old_state = self.state_at(block)?; + operation.old_state.disable_syncing(); + operation.commit_state = true; Ok(()) } - fn commit_operation(&self, operation: Self::BlockImportOperation) - -> ClientResult<()> - { + fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { let usage = operation.old_state.usage_info(); self.state_usage.merge_sm(usage); @@ -1447,7 +1471,6 @@ impl sc_client_api::backend::Backend for Backend { Some(self.offchain_storage.clone()) } - fn usage_info(&self) -> Option { let (io_stats, state_stats) = self.io_stats.take_or_else(|| ( @@ -1455,13 +1478,17 @@ impl sc_client_api::backend::Backend for Backend { self.state_usage.take(), ) ); - let database_cache = parity_util_mem::malloc_size(&*self.storage.db); - let state_cache = (*&self.shared_cache).lock().used_storage_cache_size(); + let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.storage.db)); + let state_cache = MemorySize::from_bytes( + (*&self.shared_cache).lock().used_storage_cache_size(), + ); + let state_db = self.storage.state_db.memory_info(); Some(UsageInfo { memory: MemoryInfo { state_cache, database_cache, + state_db, }, io: IoInfo { transactions: io_stats.transactions, @@ -1568,7 +1595,17 @@ impl sc_client_api::backend::Backend for Backend { let root = genesis_storage.0.clone(); let db_state = DbState::::new(Arc::new(genesis_storage), root); let state = RefTrackingState::new(db_state, self.storage.clone(), None); - return Ok(CachingState::new(state, self.shared_cache.clone(), None)); + let caching_state = CachingState::new( + state, + self.shared_cache.clone(), + None, + ); + return Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )); }, _ => {} } @@ -1591,7 +1628,17 @@ impl sc_client_api::backend::Backend for Backend { self.storage.clone(), Some(hash.clone()), ); - Ok(CachingState::new(state, self.shared_cache.clone(), Some(hash))) + let caching_state = CachingState::new( + state, + self.shared_cache.clone(), + Some(hash), + ); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) } else { Err( sp_blockchain::Error::UnknownBlock( @@ -1626,17 +1673,8 @@ impl sc_client_api::backend::Backend for Backend { } } - fn destroy_state(&self, state: Self::State) -> ClientResult<()> { - self.state_usage.merge_sm(state.usage_info()); - if let Some(hash) = state.cache.parent_hash.clone() { - let is_best = self.blockchain.meta.read().best_hash == hash; - state.release().sync_cache(&[], &[], vec![], vec![], None, None, is_best); - } - Ok(()) - } - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock + &*self.import_lock } } @@ -1647,7 +1685,7 @@ pub(crate) mod tests { use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use crate::columns; - use sp_core::{Blake2Hasher, H256}; + use sp_core::H256; use sc_client_api::backend::{Backend as BTrait, BlockImportOperation as Op}; use sc_client::blockchain::Backend as BLBTrait; use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; @@ -1658,11 +1696,11 @@ pub(crate) mod tests { pub(crate) type Block = RawBlock>; - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); + let mut changes_trie_update = MemoryDB::::default(); { - let mut trie = TrieDBMut::::new( + let mut trie = TrieDBMut::::new( &mut changes_trie_update, &mut changes_root ); @@ -1835,6 +1873,7 @@ pub(crate) mod tests { op.update_db_storage(overlay).unwrap(); header.state_root = root.into(); + op.update_storage(storage, Vec::new()).unwrap(); op.set_block_data( header, Some(vec![]), @@ -1894,7 +1933,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); assert_eq!(backend.storage.db.get( columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) ).unwrap().unwrap(), &b"hello"[..]); hash }; @@ -1931,7 +1970,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); assert_eq!(backend.storage.db.get( columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) ).unwrap().unwrap(), &b"hello"[..]); hash }; @@ -1969,7 +2008,7 @@ pub(crate) mod tests { assert!(backend.storage.db.get( columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) ).unwrap().is_some()); hash }; @@ -2003,7 +2042,7 @@ pub(crate) mod tests { backend.commit_operation(op).unwrap(); assert!(backend.storage.db.get( columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) ).unwrap().is_none()); } @@ -2012,7 +2051,7 @@ pub(crate) mod tests { backend.finalize_block(BlockId::Number(3), None).unwrap(); assert!(backend.storage.db.get( columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) ).unwrap().is_none()); } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index 14ce6ac0f9a05d5511698b169a117d8fbd888d9a..cda1a1195268e74893b6a18440f23d7d1495d971 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -36,7 +36,7 @@ use sp_blockchain::{ use sc_client::light::blockchain::Storage as LightBlockchainStorage; use codec::{Decode, Encode}; use sp_runtime::generic::{DigestItem, BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HasherFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; use crate::utils::{self, meta_keys, DatabaseType, Meta, db_err, read_db, block_id_to_lookup_key, read_meta}; use crate::{DatabaseSettings, FrozenForDuration}; @@ -305,7 +305,7 @@ impl LightStorage { Some(old_current_num) }); - let new_header_cht_root = cht::compute_root::, _>( + let new_header_cht_root = cht::compute_root::, _>( cht::size(), new_cht_number, cht_range.map(|num| self.hash(num)) )?; transaction.put( @@ -317,12 +317,12 @@ impl LightStorage { // if the header includes changes trie root, let's build a changes tries roots CHT if header.digest().log(DigestItem::as_changes_trie_root).is_some() { let mut current_num = new_cht_start; - let cht_range = ::std::iter::from_fn(|| { + let cht_range = std::iter::from_fn(|| { let old_current_num = current_num; current_num = current_num + One::one(); Some(old_current_num) }); - let new_changes_trie_cht_root = cht::compute_root::, _>( + let new_changes_trie_cht_root = cht::compute_root::, _>( cht::size(), new_cht_number, cht_range .map(|num| self.changes_trie_root(BlockId::Number(num))) )?; @@ -572,15 +572,16 @@ impl LightBlockchainStorage for LightStorage #[cfg(not(target_os = "unknown"))] fn usage_info(&self) -> Option { - use sc_client_api::{MemoryInfo, IoInfo}; + use sc_client_api::{MemoryInfo, IoInfo, MemorySize}; - let database_cache = parity_util_mem::malloc_size(&*self.db); + let database_cache = MemorySize::from_bytes(parity_util_mem::malloc_size(&*self.db)); let io_stats = self.io_stats.take_or_else(|| self.db.io_stats(kvdb::IoStatsKind::SincePrevious)); Some(UsageInfo { memory: MemoryInfo { database_cache, - state_cache: 0, + state_cache: Default::default(), + state_db: Default::default(), }, io: IoInfo { transactions: io_stats.transactions, diff --git a/client/db/src/stats.rs b/client/db/src/stats.rs index 805a0f498f7fcb2fe749f09db355cb0b1313d1c1..1d6ed8e7f0493c9adf7d516bfacd0467c231dae7 100644 --- a/client/db/src/stats.rs +++ b/client/db/src/stats.rs @@ -59,8 +59,14 @@ impl StateUsageStats { } /// Tally one child key read. - pub fn tally_child_key_read(&self, key: &(Vec, Vec), val: Option>, cache: bool) -> Option> { - self.tally_read(key.0.len() as u64 + key.1.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), cache); + pub fn tally_child_key_read( + &self, + key: &(Vec, Vec), + val: Option>, + cache: bool, + ) -> Option> { + let bytes = key.0.len() + key.1.len() + val.as_ref().map(|x| x.len()).unwrap_or(0); + self.tally_read(bytes as u64, cache); val } @@ -80,11 +86,15 @@ impl StateUsageStats { self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); } + /// Returns the collected `UsageInfo` and resets the internal state. pub fn take(&self) -> sp_state_machine::UsageInfo { use sp_state_machine::UsageUnit; fn unit(ops: &AtomicU64, bytes: &AtomicU64) -> UsageUnit { - UsageUnit { ops: ops.swap(0, AtomicOrdering::Relaxed), bytes: bytes.swap(0, AtomicOrdering::Relaxed) } + UsageUnit { + ops: ops.swap(0, AtomicOrdering::Relaxed), + bytes: bytes.swap(0, AtomicOrdering::Relaxed), + } } sp_state_machine::UsageInfo { diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 6ef29f47b8c42c1b5c5702cc9724da611d87d2f2..2ac1ee3dbd5f4af37f6fd21d72a55ee7a5748d1d 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -18,10 +18,11 @@ use std::collections::{VecDeque, HashSet, HashMap}; use std::sync::Arc; +use std::hash::Hash as StdHash; use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; use linked_hash_map::{LinkedHashMap, Entry}; use hash_db::Hasher; -use sp_runtime::traits::{Block as BlockT, Header, HasherFor, NumberFor}; +use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; use sp_state_machine::{ @@ -29,8 +30,7 @@ use sp_state_machine::{ StorageCollection, ChildStorageCollection, }; use log::trace; -use std::hash::Hash as StdHash; -use crate::stats::StateUsageStats; +use crate::{utils::Meta, stats::StateUsageStats}; const STATE_CACHE_BLOCKS: usize = 12; @@ -281,7 +281,7 @@ pub struct CacheChanges { /// Shared canonical state cache. shared_cache: SharedCache, /// Local cache of values for this state. - local_cache: RwLock>>, + local_cache: RwLock>>, /// Hash of the block on top of which this instance was created or /// `None` if cache is disabled pub parent_hash: Option, @@ -296,16 +296,16 @@ pub struct CacheChanges { /// For canonical instances local cache is accumulated and applied /// in `sync_cache` along with the change overlay. /// For non-canonical clones local cache and changes are dropped. -pub struct CachingState>, B: BlockT> { +pub struct CachingState { /// Usage statistics usage: StateUsageStats, /// Backing state. state: S, /// Cache data. - pub cache: CacheChanges, + cache: CacheChanges, } -impl>, B: BlockT> std::fmt::Debug for CachingState { +impl std::fmt::Debug for CachingState { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "Block {:?}", self.cache.parent_hash) } @@ -417,12 +417,15 @@ impl CacheChanges { } } } - } -impl>, B: BlockT> CachingState { +impl>, B: BlockT> CachingState { /// Create a new instance wrapping generic State and shared cache. - pub fn new(state: S, shared_cache: SharedCache, parent_hash: Option) -> Self { + pub(crate) fn new( + state: S, + shared_cache: SharedCache, + parent_hash: Option, + ) -> Self { CachingState { usage: StateUsageStats::new(), state, @@ -433,7 +436,7 @@ impl>, B: BlockT> CachingState { hashes: Default::default(), child_storage: Default::default(), }), - parent_hash: parent_hash, + parent_hash, }, } } @@ -445,8 +448,7 @@ impl>, B: BlockT> CachingState { child_key: Option<&ChildStorageKey>, parent_hash: &Option, modifications: &VecDeque> - ) -> bool - { + ) -> bool { let mut parent = match *parent_hash { None => { trace!("Cache lookup skipped for {:?}: no parent hash", key.as_ref().map(HexDisplay::from)); @@ -479,17 +481,15 @@ impl>, B: BlockT> CachingState { } } } - trace!("Cache lookup skipped for {:?}: parent hash is unknown", key.as_ref().map(HexDisplay::from)); + trace!( + "Cache lookup skipped for {:?}: parent hash is unknown", + key.as_ref().map(HexDisplay::from), + ); false } - - /// Dispose state and return cache data. - pub fn release(self) -> CacheChanges { - self.cache - } } -impl>, B: BlockT> StateBackend> for CachingState { +impl>, B: BlockT> StateBackend> for CachingState { type Error = S::Error; type Transaction = S::Transaction; type TrieBackendStorage = S::TrieBackendStorage; @@ -659,7 +659,7 @@ impl>, B: BlockT> StateBackend> for Ca self.state.child_keys(storage_key, child_info, prefix) } - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { self.state.as_trie_backend() } @@ -668,12 +668,221 @@ impl>, B: BlockT> StateBackend> for Ca } } +/// Extended [`CachingState`] that will sync the caches on drop. +pub struct SyncingCachingState { + /// The usage statistics of the backend. These will be updated on drop. + state_usage: Arc, + /// Reference to the meta db. + meta: Arc, Block::Hash>>>, + /// Mutex to lock get exlusive access to the backend. + lock: Arc>, + /// The wrapped caching state. + /// + /// This is required to be a `Option`, because sometimes we want to extract + /// the cache changes and Rust does not allow to move fields from types that + /// implement `Drop`. + caching_state: Option>, + /// Disable syncing of the cache. This is by default always `false`. However, + /// we need to disable syncing when this is a state in a + /// [`BlockImportOperation`](crate::BlockImportOperation). The import operation + /// takes care to sync the cache and more importantly we want to prevent a dead + /// lock. + disable_syncing: bool, +} + +impl SyncingCachingState { + /// Create new automatic syncing state. + pub fn new( + caching_state: CachingState, + state_usage: Arc, + meta: Arc, B::Hash>>>, + lock: Arc>, + ) -> Self { + Self { + caching_state: Some(caching_state), + state_usage, + meta, + lock, + disable_syncing: false, + } + } + + /// Returns the reference to the internal [`CachingState`]. + fn caching_state(&self) -> &CachingState { + self.caching_state + .as_ref() + .expect("`caching_state` is always valid for the lifetime of the object; qed") + } + + /// Convert `Self` into the cache changes. + pub fn into_cache_changes(mut self) -> CacheChanges { + self.caching_state + .take() + .expect("`caching_state` is always valid for the lifetime of the object; qed") + .cache + } + + /// Disable syncing the cache on drop. + pub fn disable_syncing(&mut self) { + self.disable_syncing = true; + } +} + +impl std::fmt::Debug for SyncingCachingState { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.caching_state().fmt(f) + } +} + +impl>, B: BlockT> StateBackend> for SyncingCachingState { + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.caching_state().storage(key) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.caching_state().storage_hash(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.caching_state().child_storage(storage_key, child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.caching_state().exists_storage(key) + } + + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.caching_state().exists_child_storage(storage_key, child_info, key) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.caching_state().for_keys_in_child_storage(storage_key, child_info, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.caching_state().next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.caching_state().next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.caching_state().for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.caching_state().for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.caching_state().for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.caching_state().storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (B::Hash, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.caching_state().child_storage_root(storage_key, child_info, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.caching_state().pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.caching_state().keys(prefix) + } + + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec> { + self.caching_state().child_keys(storage_key, child_info, prefix) + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + self.caching_state + .as_mut() + .expect("`caching_state` is valid for the lifetime of the object; qed") + .as_trie_backend() + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + self.caching_state().usage_info() + } +} + +impl Drop for SyncingCachingState { + fn drop(&mut self) { + if self.disable_syncing { + return; + } + + if let Some(mut caching_state) = self.caching_state.take() { + let _lock = self.lock.read(); + + self.state_usage.merge_sm(caching_state.usage.take()); + if let Some(hash) = caching_state.cache.parent_hash.clone() { + let is_best = self.meta.read().best_hash == hash; + caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best); + } + } + } +} + #[cfg(test)] mod tests { use super::*; - use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::{ + traits::BlakeTwo256, + testing::{H256, Block as RawBlock, ExtrinsicWrapper}, + }; use sp_state_machine::InMemoryBackend; - use sp_core::Blake2Hasher; type Block = RawBlock>; @@ -695,7 +904,7 @@ mod tests { // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] // state [ 5 5 4 3 2 2 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(root_parent), ); @@ -710,14 +919,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h0), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h0), ); @@ -732,7 +941,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1b), ); @@ -747,7 +956,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1a), ); @@ -762,35 +971,35 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2a), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h3a), ); assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1a), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2b), ); assert!(s.storage(&key).unwrap().is_none()); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1b), ); @@ -799,7 +1008,7 @@ mod tests { // reorg to 3b // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2b), ); @@ -813,7 +1022,7 @@ mod tests { true, ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h3a), ); @@ -834,7 +1043,7 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(root_parent), ); @@ -849,14 +1058,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); @@ -871,7 +1080,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2b), ); @@ -886,7 +1095,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2a), ); @@ -906,21 +1115,21 @@ mod tests { let shared = new_shared_cache::(256*1024, (0,1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(root_parent), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2a), ); @@ -935,14 +1144,14 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h2b), ); @@ -957,7 +1166,7 @@ mod tests { ); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h3a), ); @@ -971,7 +1180,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), + InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), ); let key = H256::random()[..].to_vec(); @@ -1009,7 +1218,7 @@ mod tests { let h0 = H256::random(); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(root_parent), ); @@ -1053,7 +1262,7 @@ mod tests { let shared = new_shared_cache::(256 * 1024, (0, 1)); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), ); @@ -1068,7 +1277,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h0), ); @@ -1083,7 +1292,7 @@ mod tests { ); let mut s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); @@ -1106,7 +1315,7 @@ mod tests { s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); let s = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), shared.clone(), Some(h1), ); @@ -1121,9 +1330,11 @@ mod qc { use quickcheck::{quickcheck, TestResult, Arbitrary}; use super::*; - use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; + use sp_runtime::{ + traits::BlakeTwo256, + testing::{H256, Block as RawBlock, ExtrinsicWrapper}, + }; use sp_state_machine::InMemoryBackend; - use sp_core::Blake2Hasher; type Block = RawBlock>; @@ -1250,22 +1461,22 @@ mod qc { } } - fn head_state(&self, hash: H256) -> CachingState, Block> { + fn head_state(&self, hash: H256) -> CachingState, Block> { CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), self.shared.clone(), - Some(hash) + Some(hash), ) } - fn canon_head_state(&self) -> CachingState, Block> { + fn canon_head_state(&self) -> CachingState, Block> { self.head_state(self.canon.last().expect("Expected to be one commit").hash) } fn mutate_static( &mut self, action: Action, - ) -> CachingState, Block> { + ) -> CachingState, Block> { self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") } @@ -1284,7 +1495,7 @@ mod qc { fn mutate( &mut self, action: Action, - ) -> Result, Block>, ()> { + ) -> Result, Block>, ()> { let state = match action { Action::Fork { depth, hash, changes } => { let pos = self.canon.len() as isize - depth as isize; @@ -1321,9 +1532,9 @@ mod qc { }; let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), self.shared.clone(), - Some(parent) + Some(parent), ); state.cache.sync_cache( @@ -1360,9 +1571,9 @@ mod qc { } let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), self.shared.clone(), - Some(parent_hash) + Some(parent_hash), ); state.cache.sync_cache( @@ -1407,9 +1618,9 @@ mod qc { self.canon.push(node); let mut state = CachingState::new( - InMemoryBackend::::default(), + InMemoryBackend::::default(), self.shared.clone(), - Some(fork_at) + Some(fork_at), ); let height = pos as u64 + enacted.len() as u64 + 2; diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 534cbb2197ebce2c55006b10bea4529227418969..f26714eb5a7dbf9edae0fcac6a850e877478fd0e 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -36,6 +36,7 @@ use crate::{DatabaseSettings, DatabaseSettingsSrc}; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. +#[cfg(any(feature = "kvdb-rocksdb", feature = "test-helpers", test))] pub const NUM_COLUMNS: u32 = 11; /// Meta column. The set of keys in the column is shared by full && light storages. pub const COLUMN_META: u32 = 0; diff --git a/client/executor/Cargo.toml b/client/executor/Cargo.toml index c8774470febf3ea3ed82e380b8f0775dd7cbd55c..1353bc57307b9c5d664b005f7d1bcbb997ca093f 100644 --- a/client/executor/Cargo.toml +++ b/client/executor/Cargo.toml @@ -1,28 +1,32 @@ [package] name = "sc-executor" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "A crate that provides means of executing/dispatching calls into the runtime." +documentation = "https://docs.rs/sc-executor" [dependencies] derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-trie = { version = "2.0.0", path = "../../primitives/trie" } -sp-serializer = { version = "2.0.0", path = "../../primitives/serializer" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-panic-handler = { version = "2.0.0", path = "../../primitives/panic-handler" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-trie = { version = "2.0.0-alpha.2", path = "../../primitives/trie" } +sp-serializer = { version = "2.0.0-alpha.2", path = "../../primitives/serializer" } +sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } +sp-panic-handler = { version = "2.0.0-alpha.2", path = "../../primitives/panic-handler" } wasmi = "0.6.2" parity-wasm = "0.41.0" lazy_static = "1.4.0" -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface" } -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -sc-executor-common = { version = "0.8", path = "common" } -sc-executor-wasmi = { version = "0.8", path = "wasmi" } -sc-executor-wasmtime = { version = "0.8", path = "wasmtime", optional = true } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../primitives/runtime-interface" } +sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } +sc-executor-common = { version = "0.8.0-alpha.2", path = "common" } +sc-executor-wasmi = { version = "0.8.0-alpha.2", path = "wasmi" } +sc-executor-wasmtime = { version = "0.8.0-alpha.2", path = "wasmtime", optional = true } parking_lot = "0.10.0" log = "0.4.8" libsecp256k1 = "0.3.4" @@ -31,10 +35,11 @@ libsecp256k1 = "0.3.4" assert_matches = "1.3.0" wabt = "0.9.2" hex-literal = "0.2.1" -sc-runtime-test = { version = "2.0.0", path = "runtime-test" } -substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sc-runtime-test = { version = "2.0.0-dev", path = "runtime-test" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } test-case = "0.3.3" +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } [features] default = [ "std" ] diff --git a/client/executor/common/Cargo.toml b/client/executor/common/Cargo.toml index e10afe3448b7e960571c7cb1135f97d72a7cf066..04db56938a4bc1105b140b8f1d74207a2c5d3395 100644 --- a/client/executor/common/Cargo.toml +++ b/client/executor/common/Cargo.toml @@ -1,20 +1,24 @@ [package] name = "sc-executor-common" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "A set of common definitions that are needed for defining execution engines." +documentation = "https://docs.rs/sc-executor-common/" [dependencies] log = "0.4.8" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } wasmi = "0.6.2" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-serializer = { version = "2.0.0", path = "../../../primitives/serializer" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } +sp-serializer = { version = "2.0.0-alpha.2", path = "../../../primitives/serializer" } [features] default = [] diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index 7af6c2bd53c0ec1f9de45356d459a100e2cf50e8..b59ca8ba930edb71cdc7d671bbb3e99df9d35853 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -17,18 +17,25 @@ //! Definitions for a wasm runtime. use crate::error::Error; -use sp_wasm_interface::{Function, Value}; +use sp_wasm_interface::Value; -/// A trait that defines an abstract wasm runtime. +/// A trait that defines an abstract WASM runtime module. /// /// This can be implemented by an execution engine. -pub trait WasmRuntime { - /// Return the host functions that are registered for this Wasm runtime. - fn host_functions(&self) -> &[&'static dyn Function]; +pub trait WasmModule: Sync + Send { + /// Create a new instance. + fn new_instance(&self) -> Result, Error>; +} - /// Call a method in the Substrate runtime by name. Returns the encoded result on success. - fn call(&mut self, method: &str, data: &[u8]) -> Result, Error>; +/// A trait that defines an abstract wasm module instance. +/// +/// This can be implemented by an execution engine. +pub trait WasmInstance: Send { + /// Call a method on this WASM instance and reset it afterwards. + /// Returns the encoded result on success. + fn call(&self, method: &str, data: &[u8]) -> Result, Error>; /// Get the value from a global with the given `name`. - fn get_global_val(&self, name: &str) -> Result, Error>; + /// This method is only suitable for getting immutable globals. + fn get_global_const(&self, name: &str) -> Result, Error>; } diff --git a/client/executor/runtime-test/Cargo.toml b/client/executor/runtime-test/Cargo.toml index 3cb0c03a87c8eea3832ef3993fc6711d58deee86..ad7c44718d243433ae6e2e775d29b213d091ca3a 100644 --- a/client/executor/runtime-test/Cargo.toml +++ b/client/executor/runtime-test/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "sc-runtime-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../../primitives/io" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../../primitives/sandbox" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-allocator = { version = "2.0.0", default-features = false, path = "../../../primitives/allocator" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/io" } +sp-sandbox = { version = "0.8.0-alpha.2", default-features = false, path = "../../../primitives/sandbox" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +sp-allocator = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/allocator" } [build-dependencies] -wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [features] default = [ "std" ] diff --git a/client/executor/src/deprecated_host_interface.rs b/client/executor/src/deprecated_host_interface.rs deleted file mode 100644 index 6ea0b11f5a909ad5c424aa4f7b07b09f53a1cf07..0000000000000000000000000000000000000000 --- a/client/executor/src/deprecated_host_interface.rs +++ /dev/null @@ -1,941 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Definition and implementation of the old and deprecated Substrate runtime interface for the host. - -use codec::Encode; -use std::{convert::TryFrom, str}; -use sp_core::{ - blake2_128, blake2_256, twox_64, twox_128, twox_256, ed25519, sr25519, keccak_256, Blake2Hasher, Pair, - crypto::KeyTypeId, offchain, -}; -use sp_trie::{TrieConfiguration, trie_types::Layout}; -use sp_wasm_interface::{ - Pointer, WordSize, WritePrimitive, ReadPrimitive, FunctionContext, Result as WResult, -}; - -#[cfg(feature="wasm-extern-trace")] -macro_rules! debug_trace { - ( $( $x:tt )* ) => ( trace!( $( $x )* ) ) -} - -#[cfg(not(feature="wasm-extern-trace"))] -macro_rules! debug_trace { - ( $( $x:tt )* ) => () -} - -/// The old and deprecated Substrate externals. These are still required for backwards compatibility -/// reasons. -pub struct SubstrateExternals; - -enum RecoverResult { - Invalid(u32), - Valid(secp256k1::PublicKey), -} - -fn secp256k1_recover( - context: &mut dyn FunctionContext, - msg_data: Pointer, - sig_data: Pointer, -) -> WResult { - let mut sig = [0u8; 65]; - context.read_memory_into(sig_data, &mut sig[..]) - .map_err(|_| "Invalid attempt to get signature in ext_secp256k1_ecdsa_recover")?; - let rs = match secp256k1::Signature::parse_slice(&sig[0..64]) { - Ok(rs) => rs, - _ => return Ok(RecoverResult::Invalid(1)), - }; - - let recovery_id = if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8; - let v = match secp256k1::RecoveryId::parse(recovery_id) { - Ok(v) => v, - _ => return Ok(RecoverResult::Invalid(2)), - }; - - let mut msg = [0u8; 32]; - context.read_memory_into(msg_data, &mut msg[..]) - .map_err(|_| "Invalid attempt to get message in ext_secp256k1_ecdsa_recover")?; - - Ok(match secp256k1::recover(&secp256k1::Message::parse(&msg), &rs, &v) { - Ok(pubkey) => RecoverResult::Valid(pubkey), - Err(_) => RecoverResult::Invalid(3), - }) -} - -impl_wasm_host_interface! { - impl SubstrateExternals where context { - ext_malloc(size: WordSize) -> Pointer { - let r = context.allocate_memory(size)?; - debug_trace!(target: "sp-io", "malloc {} bytes at {:?}", size, r); - Ok(r) - } - - ext_free(addr: Pointer) { - context.deallocate_memory(addr)?; - debug_trace!(target: "sp-io", "free {:?}", addr); - Ok(()) - } - - ext_sandbox_instantiate( - dispatch_thunk_idx: u32, - wasm_ptr: Pointer, - wasm_len: WordSize, - imports_ptr: Pointer, - imports_len: WordSize, - state: u32, - ) -> u32 { - let wasm = context.read_memory(wasm_ptr, wasm_len) - .map_err(|_| "OOB while ext_sandbox_instantiate: wasm")?; - let raw_env_def = context.read_memory(imports_ptr, imports_len) - .map_err(|_| "OOB while ext_sandbox_instantiate: imports")?; - - context.sandbox().instance_new(dispatch_thunk_idx, &wasm, &raw_env_def, state) - } - - ext_sandbox_instance_teardown(instance_idx: u32) { - context.sandbox().instance_teardown(instance_idx) - } - - ext_sandbox_invoke( - instance_idx: u32, - export_ptr: Pointer, - export_len: WordSize, - args_ptr: Pointer, - args_len: WordSize, - return_val_ptr: Pointer, - return_val_len: WordSize, - state: u32, - ) -> u32 { - let export = context.read_memory(export_ptr, export_len) - .map_err(|_| "OOB while ext_sandbox_invoke: export") - .and_then(|b| - String::from_utf8(b) - .map_err(|_| "Export name should be a valid utf-8 sequence") - )?; - - // Deserialize arguments and convert them into wasmi types. - let serialized_args = context.read_memory(args_ptr, args_len) - .map_err(|_| "OOB while ext_sandbox_invoke: args")?; - - context.sandbox().invoke( - instance_idx, - &export, - &serialized_args, - return_val_ptr, - return_val_len, - state, - ) - } - - ext_sandbox_memory_new(initial: WordSize, maximum: WordSize) -> u32 { - context.sandbox().memory_new(initial, maximum) - } - - ext_sandbox_memory_get( - memory_idx: u32, - offset: WordSize, - buf_ptr: Pointer, - buf_len: WordSize, - ) -> u32 { - context.sandbox().memory_get(memory_idx, offset, buf_ptr, buf_len) - } - - ext_sandbox_memory_set( - memory_idx: u32, - offset: WordSize, - val_ptr: Pointer, - val_len: WordSize, - ) -> u32 { - context.sandbox().memory_set(memory_idx, offset, val_ptr, val_len) - } - - ext_sandbox_memory_teardown(memory_idx: u32) { - context.sandbox().memory_teardown(memory_idx) - } - - ext_print_utf8(utf8_data: Pointer, utf8_len: WordSize) { - if let Ok(utf8) = context.read_memory(utf8_data, utf8_len) { - sp_io::misc::print_utf8(&utf8); - } - Ok(()) - } - - ext_print_hex(data: Pointer, len: WordSize) { - if let Ok(hex) = context.read_memory(data, len) { - sp_io::misc::print_hex(&hex); - } - Ok(()) - } - - ext_print_num(number: u64) { - sp_io::misc::print_num(number); - Ok(()) - } - - ext_log( - level: u32, - target_data: Pointer, - target_len: WordSize, - message_data: Pointer, - message_len: WordSize, - ) { - let target = context.read_memory(target_data, target_len) - .map_err(|_| "Invalid attempt to determine target in ext_log")?; - let message = context.read_memory(message_data, message_len) - .map_err(|_| "Invalid attempt to determine message in ext_log")?; - - let target_str = std::str::from_utf8(&target) - .map_err(|_| "Target invalid utf8 in ext_log")?; - - sp_io::logging::log(level.into(), &target_str, &message); - Ok(()) - } - - ext_set_storage( - key_data: Pointer, - key_len: WordSize, - value_data: Pointer, - value_len: WordSize, - ) { - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_set_storage")?; - let value = context.read_memory(value_data, value_len) - .map_err(|_| "Invalid attempt to determine value in ext_set_storage")?; - Ok(sp_io::storage::set(&key, &value)) - } - - ext_clear_storage(key_data: Pointer, key_len: WordSize) { - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_clear_storage")?; - Ok(sp_io::storage::clear(&key)) - } - - ext_exists_storage(key_data: Pointer, key_len: WordSize) -> u32 { - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_exists_storage")?; - Ok(if sp_io::storage::exists(&key) { 1 } else { 0 }) - } - - ext_clear_prefix(prefix_data: Pointer, prefix_len: WordSize) { - let prefix = context.read_memory(prefix_data, prefix_len) - .map_err(|_| "Invalid attempt to determine prefix in ext_clear_prefix")?; - Ok(sp_io::storage::clear_prefix(&prefix)) - } - - ext_get_allocated_storage( - key_data: Pointer, - key_len: WordSize, - written_out: Pointer, - ) -> Pointer { - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to determine key in ext_get_allocated_storage")?; - - if let Some(value) = sp_io::storage::get(&key) { - let offset = context.allocate_memory(value.len() as u32)?; - context.write_memory(offset, &value) - .map_err(|_| "Invalid attempt to set memory in ext_get_allocated_storage")?; - context.write_primitive(written_out, value.len() as u32) - .map_err(|_| "Invalid attempt to write written_out in ext_get_allocated_storage")?; - Ok(offset) - } else { - context.write_primitive(written_out, u32::max_value()) - .map_err(|_| "Invalid attempt to write failed written_out in ext_get_allocated_storage")?; - Ok(Pointer::null()) - } - } - - ext_get_storage_into( - key_data: Pointer, - key_len: WordSize, - value_data: Pointer, - value_len: WordSize, - value_offset: WordSize, - ) -> WordSize { - let key = context.read_memory(key_data, key_len) - .map_err(|_| "Invalid attempt to get key in ext_get_storage_into")?; - - if let Some(value) = sp_io::storage::get(&key) { - let data = &value[value.len().min(value_offset as usize)..]; - let written = std::cmp::min(value_len as usize, data.len()); - context.write_memory(value_data, &data[..written]) - .map_err(|_| "Invalid attempt to set value in ext_get_storage_into")?; - Ok(value.len() as u32) - } else { - Ok(u32::max_value()) - } - } - - ext_storage_root(result: Pointer) { - context.write_memory(result, sp_io::storage::root().as_ref()) - .map_err(|_| "Invalid attempt to set memory in ext_storage_root".into()) - } - - ext_storage_changes_root( - parent_hash_data: Pointer, - _len: WordSize, - result: Pointer, - ) -> u32 { - let mut parent_hash = [0u8; 32]; - context.read_memory_into(parent_hash_data, &mut parent_hash[..]) - .map_err(|_| "Invalid attempt to get parent_hash in ext_storage_changes_root")?; - - if let Some(r) = sp_io::storage::changes_root(&parent_hash) { - context.write_memory(result, &r[..]) - .map_err(|_| "Invalid attempt to set memory in ext_storage_changes_root")?; - Ok(1) - } else { - Ok(0) - } - } - - ext_blake2_256_enumerated_trie_root( - values_data: Pointer, - lens_data: Pointer, - lens_len: WordSize, - result: Pointer, - ) { - let values = (0..lens_len) - .map(|i| context.read_primitive(lens_data.offset(i).ok_or("Pointer overflow")?)) - .collect::, _>>()? - .into_iter() - .scan(0u32, |acc, v| { let o = *acc; *acc += v; Some((o, v)) }) - .map(|(offset, len)| - context.read_memory(values_data.offset(offset).ok_or("Pointer overflow")?, len) - .map_err(|_| - "Invalid attempt to get memory in ext_blake2_256_enumerated_trie_root" - ) - ) - .collect::, _>>()?; - let r = Layout::::ordered_trie_root(values.into_iter()); - context.write_memory(result, &r[..]) - .map_err(|_| "Invalid attempt to set memory in ext_blake2_256_enumerated_trie_root")?; - Ok(()) - } - - ext_chain_id() -> u64 { - Ok(sp_io::misc::chain_id()) - } - - ext_twox_64(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 8] = if len == 0 { - let hashed = twox_64(&[0u8; 0]); - hashed - } else { - let key = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get key in ext_twox_64")?; - let hashed_key = twox_64(&key); - hashed_key - }; - - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_twox_64")?; - Ok(()) - } - - ext_twox_128(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 16] = if len == 0 { - let hashed = twox_128(&[0u8; 0]); - hashed - } else { - let key = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get key in ext_twox_128")?; - let hashed_key = twox_128(&key); - hashed_key - }; - - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_twox_128")?; - Ok(()) - } - - ext_twox_256(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 32] = if len == 0 { - twox_256(&[0u8; 0]) - } else { - let mem = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get data in ext_twox_256")?; - twox_256(&mem) - }; - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_twox_256")?; - Ok(()) - } - - ext_blake2_128(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 16] = if len == 0 { - let hashed = blake2_128(&[0u8; 0]); - hashed - } else { - let key = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get key in ext_blake2_128")?; - let hashed_key = blake2_128(&key); - hashed_key - }; - - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_blake2_128")?; - Ok(()) - } - - ext_blake2_256(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 32] = if len == 0 { - blake2_256(&[0u8; 0]) - } else { - let mem = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get data in ext_blake2_256")?; - blake2_256(&mem) - }; - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_blake2_256")?; - Ok(()) - } - - ext_keccak_256(data: Pointer, len: WordSize, out: Pointer) { - let result: [u8; 32] = if len == 0 { - keccak_256(&[0u8; 0]) - } else { - let mem = context.read_memory(data, len) - .map_err(|_| "Invalid attempt to get data in ext_keccak_256")?; - keccak_256(&mem) - }; - context.write_memory(out, &result) - .map_err(|_| "Invalid attempt to set result in ext_keccak_256")?; - Ok(()) - } - - ext_ed25519_public_keys(id_data: Pointer, result_len: Pointer) -> Pointer { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_ed25519_public_keys")?; - let key_type = KeyTypeId(id); - - let keys = sp_io::crypto::ed25519_public_keys(key_type).encode(); - - let len = keys.len() as u32; - let offset = context.allocate_memory(len)?; - - context.write_memory(offset, keys.as_ref()) - .map_err(|_| "Invalid attempt to set memory in ext_ed25519_public_keys")?; - context.write_primitive(result_len, len) - .map_err(|_| "Invalid attempt to write result_len in ext_ed25519_public_keys")?; - - Ok(offset) - } - - ext_ed25519_verify( - msg_data: Pointer, - msg_len: WordSize, - sig_data: Pointer, - pubkey_data: Pointer, - ) -> u32 { - let mut sig = [0u8; 64]; - context.read_memory_into(sig_data, &mut sig[..]) - .map_err(|_| "Invalid attempt to get signature in ext_ed25519_verify")?; - let mut pubkey = [0u8; 32]; - context.read_memory_into(pubkey_data, &mut pubkey[..]) - .map_err(|_| "Invalid attempt to get pubkey in ext_ed25519_verify")?; - let msg = context.read_memory(msg_data, msg_len) - .map_err(|_| "Invalid attempt to get message in ext_ed25519_verify")?; - - Ok(if ed25519::Pair::verify_weak(&sig, &msg, &pubkey) { - 0 - } else { - 1 - }) - } - - ext_ed25519_generate( - id_data: Pointer, - seed: Pointer, - seed_len: WordSize, - out: Pointer, - ) { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_ed25519_generate")?; - let key_type = KeyTypeId(id); - - let seed = if seed_len == 0 { - None - } else { - Some( - context.read_memory(seed, seed_len) - .map_err(|_| "Invalid attempt to get seed in ext_ed25519_generate")? - ) - }; - - let pubkey = sp_io::crypto::ed25519_generate(key_type, seed); - - context.write_memory(out, pubkey.as_ref()) - .map_err(|_| "Invalid attempt to set out in ext_ed25519_generate".into()) - } - - ext_ed25519_sign( - id_data: Pointer, - pubkey_data: Pointer, - msg_data: Pointer, - msg_len: WordSize, - out: Pointer, - ) -> u32 { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_ed25519_sign")?; - let key_type = KeyTypeId(id); - - let mut pubkey = [0u8; 32]; - context.read_memory_into(pubkey_data, &mut pubkey[..]) - .map_err(|_| "Invalid attempt to get pubkey in ext_ed25519_sign")?; - - let msg = context.read_memory(msg_data, msg_len) - .map_err(|_| "Invalid attempt to get message in ext_ed25519_sign")?; - - let pub_key = ed25519::Public::try_from(pubkey.as_ref()) - .map_err(|_| "Invalid `ed25519` public key")?; - - let signature = sp_io::crypto::ed25519_sign(key_type, &pub_key, &msg); - - match signature { - Some(signature) => { - context.write_memory(out, signature.as_ref()) - .map_err(|_| "Invalid attempt to set out in ext_ed25519_sign")?; - Ok(0) - }, - None => Ok(1), - } - } - - ext_sr25519_public_keys(id_data: Pointer, result_len: Pointer) -> Pointer { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_sr25519_public_keys")?; - let key_type = KeyTypeId(id); - - let keys = sp_io::crypto::sr25519_public_keys(key_type).encode(); - - let len = keys.len() as u32; - let offset = context.allocate_memory(len)?; - - context.write_memory(offset, keys.as_ref()) - .map_err(|_| "Invalid attempt to set memory in ext_sr25519_public_keys")?; - context.write_primitive(result_len, len) - .map_err(|_| "Invalid attempt to write result_len in ext_sr25519_public_keys")?; - - Ok(offset) - } - - ext_sr25519_verify( - msg_data: Pointer, - msg_len: WordSize, - sig_data: Pointer, - pubkey_data: Pointer, - ) -> u32 { - let mut sig = [0u8; 64]; - context.read_memory_into(sig_data, &mut sig[..]) - .map_err(|_| "Invalid attempt to get signature in ext_sr25519_verify")?; - let mut pubkey = [0u8; 32]; - context.read_memory_into(pubkey_data, &mut pubkey[..]) - .map_err(|_| "Invalid attempt to get pubkey in ext_sr25519_verify")?; - let msg = context.read_memory(msg_data, msg_len) - .map_err(|_| "Invalid attempt to get message in ext_sr25519_verify")?; - - Ok(if sr25519::Pair::verify_weak(&sig, &msg, &pubkey) { - 0 - } else { - 1 - }) - } - - ext_sr25519_generate( - id_data: Pointer, - seed: Pointer, - seed_len: WordSize, - out: Pointer, - ) { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_sr25519_generate")?; - let key_type = KeyTypeId(id); - let seed = if seed_len == 0 { - None - } else { - Some( - context.read_memory(seed, seed_len) - .map_err(|_| "Invalid attempt to get seed in ext_sr25519_generate")? - ) - }; - - let pubkey = sp_io::crypto::sr25519_generate(key_type, seed); - - context.write_memory(out, pubkey.as_ref()) - .map_err(|_| "Invalid attempt to set out in ext_sr25519_generate".into()) - } - - ext_sr25519_sign( - id_data: Pointer, - pubkey_data: Pointer, - msg_data: Pointer, - msg_len: WordSize, - out: Pointer, - ) -> u32 { - let mut id = [0u8; 4]; - context.read_memory_into(id_data, &mut id[..]) - .map_err(|_| "Invalid attempt to get id in ext_sr25519_sign")?; - let key_type = KeyTypeId(id); - - let mut pubkey = [0u8; 32]; - context.read_memory_into(pubkey_data, &mut pubkey[..]) - .map_err(|_| "Invalid attempt to get pubkey in ext_sr25519_sign")?; - - let msg = context.read_memory(msg_data, msg_len) - .map_err(|_| "Invalid attempt to get message in ext_sr25519_sign")?; - - let pub_key = sr25519::Public::try_from(pubkey.as_ref()) - .map_err(|_| "Invalid `sr25519` public key")?; - - let signature = sp_io::crypto::sr25519_sign(key_type, &pub_key, &msg); - - match signature { - Some(signature) => { - context.write_memory(out, signature.as_ref()) - .map_err(|_| "Invalid attempt to set out in ext_sr25519_sign")?; - Ok(0) - }, - None => Ok(1), - } - } - - ext_secp256k1_ecdsa_recover( - msg_data: Pointer, - sig_data: Pointer, - pubkey_data: Pointer, - ) -> u32 { - match secp256k1_recover(context, msg_data, sig_data)? { - RecoverResult::Invalid(c) => Ok(c), - RecoverResult::Valid(pubkey) => { - context.write_memory(pubkey_data, &pubkey.serialize()[1..65]) - .map_err(|_| "Invalid attempt to set pubkey in ext_secp256k1_ecdsa_recover")?; - Ok(0) - } - } - } - - ext_secp256k1_ecdsa_recover_compressed( - msg_data: Pointer, - sig_data: Pointer, - pubkey_data: Pointer, - ) -> u32 { - match secp256k1_recover(context, msg_data, sig_data)? { - RecoverResult::Invalid(c) => Ok(c), - RecoverResult::Valid(pubkey) => { - context.write_memory(pubkey_data, &pubkey.serialize_compressed()[..]) - .map_err(|_| "Invalid attempt to set pubkey in ext_secp256k1_ecdsa_recover")?; - Ok(0) - } - } - } - - ext_is_validator() -> u32 { - if sp_io::offchain::is_validator() { Ok(1) } else { Ok(0) } - } - - ext_submit_transaction(msg_data: Pointer, len: WordSize) -> u32 { - let extrinsic = context.read_memory(msg_data, len) - .map_err(|_| "OOB while ext_submit_transaction: wasm")?; - - let res = sp_io::offchain::submit_transaction(extrinsic); - - Ok(if res.is_ok() { 0 } else { 1 }) - } - - ext_network_state(written_out: Pointer) -> Pointer { - let res = sp_io::offchain::network_state(); - - let encoded = res.encode(); - let len = encoded.len() as u32; - let offset = context.allocate_memory(len)?; - context.write_memory(offset, &encoded) - .map_err(|_| "Invalid attempt to set memory in ext_network_state")?; - - context.write_primitive(written_out, len) - .map_err(|_| "Invalid attempt to write written_out in ext_network_state")?; - - Ok(offset) - } - - ext_timestamp() -> u64 { - Ok(sp_io::offchain::timestamp().unix_millis()) - } - - ext_sleep_until(deadline: u64) { - sp_io::offchain::sleep_until(offchain::Timestamp::from_unix_millis(deadline)); - Ok(()) - } - - ext_random_seed(seed_data: Pointer) { - // NOTE the runtime as assumptions about seed size. - let seed = sp_io::offchain::random_seed(); - - context.write_memory(seed_data, &seed) - .map_err(|_| "Invalid attempt to set value in ext_random_seed")?; - Ok(()) - } - - ext_local_storage_set( - kind: u32, - key: Pointer, - key_len: WordSize, - value: Pointer, - value_len: WordSize, - ) { - let kind = offchain::StorageKind::try_from(kind) - .map_err(|_| "storage kind OOB while ext_local_storage_set: wasm")?; - let key = context.read_memory(key, key_len) - .map_err(|_| "OOB while ext_local_storage_set: wasm")?; - let value = context.read_memory(value, value_len) - .map_err(|_| "OOB while ext_local_storage_set: wasm")?; - - sp_io::offchain::local_storage_set(kind, &key, &value); - - Ok(()) - } - - ext_local_storage_get( - kind: u32, - key: Pointer, - key_len: WordSize, - value_len: Pointer, - ) -> Pointer { - let kind = offchain::StorageKind::try_from(kind) - .map_err(|_| "storage kind OOB while ext_local_storage_get: wasm")?; - let key = context.read_memory(key, key_len) - .map_err(|_| "OOB while ext_local_storage_get: wasm")?; - - let maybe_value = sp_io::offchain::local_storage_get(kind, &key); - - let (offset, len) = if let Some(value) = maybe_value { - let offset = context.allocate_memory(value.len() as u32)?; - context.write_memory(offset, &value) - .map_err(|_| "Invalid attempt to set memory in ext_local_storage_get")?; - (offset, value.len() as u32) - } else { - (Pointer::null(), u32::max_value()) - }; - - context.write_primitive(value_len, len) - .map_err(|_| "Invalid attempt to write value_len in ext_local_storage_get")?; - - Ok(offset) - } - - ext_local_storage_compare_and_set( - kind: u32, - key: Pointer, - key_len: WordSize, - old_value: Pointer, - old_value_len: WordSize, - new_value: Pointer, - new_value_len: WordSize, - ) -> u32 { - let kind = offchain::StorageKind::try_from(kind) - .map_err(|_| "storage kind OOB while ext_local_storage_compare_and_set: wasm")?; - let key = context.read_memory(key, key_len) - .map_err(|_| "OOB while ext_local_storage_compare_and_set: wasm")?; - let new_value = context.read_memory(new_value, new_value_len) - .map_err(|_| "OOB while ext_local_storage_compare_and_set: wasm")?; - - let old_value = if old_value_len == u32::max_value() { - None - } else { - Some( - context.read_memory(old_value, old_value_len) - .map_err(|_| "OOB while ext_local_storage_compare_and_set: wasm")? - ) - }; - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - &key, - old_value, - &new_value, - ); - - Ok(if res { 0 } else { 1 }) - } - - ext_http_request_start( - method: Pointer, - method_len: WordSize, - url: Pointer, - url_len: WordSize, - meta: Pointer, - meta_len: WordSize, - ) -> u32 { - let method = context.read_memory(method, method_len) - .map_err(|_| "OOB while ext_http_request_start: wasm")?; - let url = context.read_memory(url, url_len) - .map_err(|_| "OOB while ext_http_request_start: wasm")?; - let meta = context.read_memory(meta, meta_len) - .map_err(|_| "OOB while ext_http_request_start: wasm")?; - - let method_str = str::from_utf8(&method) - .map_err(|_| "invalid str while ext_http_request_start: wasm")?; - let url_str = str::from_utf8(&url) - .map_err(|_| "invalid str while ext_http_request_start: wasm")?; - - let id = sp_io::offchain::http_request_start(method_str, url_str, &meta); - - if let Ok(id) = id { - Ok(id.into()) - } else { - Ok(u32::max_value()) - } - } - - ext_http_request_add_header( - request_id: u32, - name: Pointer, - name_len: WordSize, - value: Pointer, - value_len: WordSize, - ) -> u32 { - let name = context.read_memory(name, name_len) - .map_err(|_| "OOB while ext_http_request_add_header: wasm")?; - let value = context.read_memory(value, value_len) - .map_err(|_| "OOB while ext_http_request_add_header: wasm")?; - - let name_str = str::from_utf8(&name) - .map_err(|_| "Invalid str while ext_http_request_add_header: wasm")?; - let value_str = str::from_utf8(&value) - .map_err(|_| "Invalid str while ext_http_request_add_header: wasm")?; - - let res = sp_io::offchain::http_request_add_header( - offchain::HttpRequestId(request_id as u16), - name_str, - value_str, - ); - - Ok(if res.is_ok() { 0 } else { 1 }) - } - - ext_http_request_write_body( - request_id: u32, - chunk: Pointer, - chunk_len: WordSize, - deadline: u64, - ) -> u32 { - let chunk = context.read_memory(chunk, chunk_len) - .map_err(|_| "OOB while ext_http_request_write_body: wasm")?; - - let res = sp_io::offchain::http_request_write_body( - offchain::HttpRequestId(request_id as u16), - &chunk, - deadline_to_timestamp(deadline), - ); - - Ok(match res { - Ok(()) => 0, - Err(e) => e.into(), - }) - } - - ext_http_response_wait( - ids: Pointer, - ids_len: WordSize, - statuses: Pointer, - deadline: u64, - ) { - let ids = (0..ids_len) - .map(|i| - context.read_primitive(ids.offset(i).ok_or("Point overflow")?) - .map(|id: u32| offchain::HttpRequestId(id as u16)) - .map_err(|_| "OOB while ext_http_response_wait: wasm") - ) - .collect::, _>>()?; - - let res = sp_io::offchain::http_response_wait(&ids, deadline_to_timestamp(deadline)) - .into_iter() - .map(|status| u32::from(status)) - .enumerate() - // make sure to take up to `ids_len` to avoid exceeding the mem. - .take(ids_len as usize); - - for (i, status) in res { - context.write_primitive(statuses.offset(i as u32).ok_or("Point overflow")?, status) - .map_err(|_| "Invalid attempt to set memory in ext_http_response_wait")?; - } - - Ok(()) - } - - ext_http_response_headers( - request_id: u32, - written_out: Pointer, - ) -> Pointer { - use codec::Encode; - - let headers = sp_io::offchain::http_response_headers( - offchain::HttpRequestId(request_id as u16), - ); - - let encoded = headers.encode(); - let len = encoded.len() as u32; - let offset = context.allocate_memory(len)?; - - context.write_memory(offset, &encoded) - .map_err(|_| "Invalid attempt to set memory in ext_http_response_headers")?; - context.write_primitive(written_out, len) - .map_err(|_| "Invalid attempt to write written_out in ext_http_response_headers")?; - - Ok(offset) - } - - ext_http_response_read_body( - request_id: u32, - buffer: Pointer, - buffer_len: WordSize, - deadline: u64, - ) -> WordSize { - let mut internal_buffer = Vec::with_capacity(buffer_len as usize); - internal_buffer.resize(buffer_len as usize, 0); - - let res = sp_io::offchain::http_response_read_body( - offchain::HttpRequestId(request_id as u16), - &mut internal_buffer, - deadline_to_timestamp(deadline), - ); - - Ok(match res { - Ok(read) => { - context.write_memory(buffer, &internal_buffer[..read as usize]) - .map_err(|_| "Invalid attempt to set memory in ext_http_response_read_body")?; - - read as u32 - }, - Err(err) => { - u32::max_value() - u32::from(err) + 1 - } - }) - } - } -} - -fn deadline_to_timestamp(deadline: u64) -> Option { - if deadline == 0 { - None - } else { - Some(offchain::Timestamp::from_unix_millis(deadline)) - } -} diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index c0516d3ac7dfaa8b3447faf234834791efb56842..e787b229ec85c40c7c5f0318d9ba3ed6be7b813f 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -19,19 +19,20 @@ mod sandbox; use codec::{Encode, Decode}; use hex_literal::hex; use sp_core::{ - Blake2Hasher, blake2_128, blake2_256, ed25519, sr25519, map, Pair, + blake2_128, blake2_256, ed25519, sr25519, map, Pair, offchain::{OffchainExt, testing}, - traits::Externalities, + traits::{Externalities, CallInWasm}, }; use sc_runtime_test::WASM_BINARY; use sp_state_machine::TestExternalities as CoreTestExternalities; use test_case::test_case; use sp_trie::{TrieConfiguration, trie_types::Layout}; use sp_wasm_interface::HostFunctions as _; +use sp_runtime::traits::BlakeTwo256; use crate::WasmExecutionMethod; -pub type TestExternalities = CoreTestExternalities; +pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; fn call_in_wasm( @@ -39,15 +40,18 @@ fn call_in_wasm( call_data: &[u8], execution_method: WasmExecutionMethod, ext: &mut E, -) -> crate::error::Result> { - crate::call_in_wasm::( +) -> Result, String> { + let executor = crate::WasmExecutor::new( + execution_method, + Some(1024), + HostFunctions::host_functions(), + true, + ); + executor.call_in_wasm( + &WASM_BINARY[..], function, call_data, - execution_method, ext, - &WASM_BINARY[..], - 1024, - true, ) } @@ -83,12 +87,12 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { match wasm_method { WasmExecutionMethod::Interpreted => assert_eq!( &format!("{:?}", e), - "Wasmi(Trap(Trap { kind: Host(Other(\"Function `missing_external` is only a stub. Calling a stub is not allowed.\")) }))" + "\"Trap: Trap { kind: Host(Other(\\\"Function `missing_external` is only a stub. Calling a stub is not allowed.\\\")) }\"" ), #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => assert_eq!( &format!("{:?}", e), - "Other(\"Wasm execution trapped: call to a missing function env:missing_external\")" + "\"Wasm execution trapped: call to a missing function env:missing_external\"" ), } } @@ -112,12 +116,12 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { match wasm_method { WasmExecutionMethod::Interpreted => assert_eq!( &format!("{:?}", e), - "Wasmi(Trap(Trap { kind: Host(Other(\"Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.\")) }))" + "\"Trap: Trap { kind: Host(Other(\\\"Function `yet_another_missing_external` is only a stub. Calling a stub is not allowed.\\\")) }\"" ), #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => assert_eq!( &format!("{:?}", e), - "Other(\"Wasm execution trapped: call to a missing function env:yet_another_missing_external\")" + "\"Wasm execution trapped: call to a missing function env:yet_another_missing_external\"" ), } } @@ -440,7 +444,7 @@ fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { wasm_method, &mut ext.ext(), ).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), + Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), ); } @@ -501,29 +505,32 @@ fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { let mut ext = TestExternalities::default(); - crate::call_in_wasm::( + let executor = crate::WasmExecutor::new( + wasm_method, + Some(17), // `17` is the initial number of pages compiled into the binary. + HostFunctions::host_functions(), + true, + ); + executor.call_in_wasm( + &WASM_BINARY[..], "test_exhaust_heap", &[0], - wasm_method, &mut ext.ext(), - &WASM_BINARY[..], - // `17` is the initial number of pages compiled into the binary. - 17, - true, ).unwrap(); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn returns_mutable_static(wasm_method: WasmExecutionMethod) { - let mut instance = crate::wasm_runtime::create_wasm_runtime_with_code( + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, 1024, &WASM_BINARY[..], HostFunctions::host_functions(), true, - ).expect("Creates instance"); + ).expect("Creates runtime"); + let instance = runtime.new_instance().unwrap(); let res = instance.call("returns_mutable_static", &[0]).unwrap(); assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); @@ -549,13 +556,14 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { // to our allocator algorithm there are inefficiencies. const REQUIRED_MEMORY_PAGES: u64 = 32; - let mut instance = crate::wasm_runtime::create_wasm_runtime_with_code( + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, REQUIRED_MEMORY_PAGES, &WASM_BINARY[..], HostFunctions::host_functions(), true, - ).expect("Creates instance"); + ).expect("Creates runtime"); + let instance = runtime.new_instance().unwrap(); // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. let res = instance.call("allocates_huge_stack_array", &true.encode()); @@ -568,15 +576,16 @@ fn restoration_of_globals(wasm_method: WasmExecutionMethod) { #[test_case(WasmExecutionMethod::Interpreted)] fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { - let mut instance = crate::wasm_runtime::create_wasm_runtime_with_code( + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( wasm_method, 1024, &WASM_BINARY[..], HostFunctions::host_functions(), true, - ).expect("Creates instance"); + ).expect("Creates runtime"); + let instance = runtime.new_instance().unwrap(); - let heap_base = instance.get_global_val("__heap_base") + let heap_base = instance.get_global_const("__heap_base") .expect("`__heap_base` is valid") .expect("`__heap_base` exists") .as_i32() diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index 152e3a498485dfb310efcc25abd8a0b2eb163e02..1c21026fd8d35c5afb10c0db139399555cd7a4d0 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -29,92 +29,31 @@ #![warn(missing_docs)] #![recursion_limit="128"] -#[macro_use] -mod wasm_utils; #[macro_use] mod native_executor; -pub mod deprecated_host_interface; mod wasm_runtime; #[cfg(test)] mod integration_tests; pub use wasmi; -pub use native_executor::{with_externalities_safe, NativeExecutor, NativeExecutionDispatch}; +pub use native_executor::{with_externalities_safe, NativeExecutor, WasmExecutor, NativeExecutionDispatch}; pub use sp_version::{RuntimeVersion, NativeVersion}; pub use codec::Codec; #[doc(hidden)] -pub use sp_core::traits::Externalities; +pub use sp_core::traits::{Externalities, CallInWasm}; #[doc(hidden)] pub use sp_wasm_interface; pub use wasm_runtime::WasmExecutionMethod; pub use sc_executor_common::{error, sandbox}; -/// Call the given `function` in the given wasm `code`. -/// -/// The signature of `function` needs to follow the default Substrate function signature. -/// -/// - `call_data`: Will be given as input parameters to `function` -/// - `execution_method`: The execution method to use. -/// - `ext`: The externalities that should be set while executing the wasm function. -/// If `None` is given, no externalities will be set. -/// - `heap_pages`: The number of heap pages to allocate. -/// -/// Returns the `Vec` that contains the return value of the function. -pub fn call_in_wasm( - function: &str, - call_data: &[u8], - execution_method: WasmExecutionMethod, - ext: &mut dyn Externalities, - code: &[u8], - heap_pages: u64, - allow_missing_func_imports: bool, -) -> error::Result> { - call_in_wasm_with_host_functions( - function, - call_data, - execution_method, - ext, - code, - heap_pages, - HF::host_functions(), - allow_missing_func_imports, - ) -} - -/// Non-generic version of [`call_in_wasm`] that takes the `host_functions` as parameter. -/// For more information please see [`call_in_wasm`]. -pub fn call_in_wasm_with_host_functions( - function: &str, - call_data: &[u8], - execution_method: WasmExecutionMethod, - ext: &mut dyn Externalities, - code: &[u8], - heap_pages: u64, - host_functions: Vec<&'static dyn sp_wasm_interface::Function>, - allow_missing_func_imports: bool, -) -> error::Result> { - let instance = wasm_runtime::create_wasm_runtime_with_code( - execution_method, - heap_pages, - code, - host_functions, - allow_missing_func_imports, - )?; - - // It is safe, as we delete the instance afterwards. - let mut instance = std::panic::AssertUnwindSafe(instance); - - with_externalities_safe(ext, move || instance.call(function, call_data)).and_then(|r| r) -} - /// Provides runtime information. pub trait RuntimeInfo { /// Native runtime information. fn native_version(&self) -> &NativeVersion; /// Extract RuntimeVersion of given :code block - fn runtime_version (&self, ext: &mut E) -> error::Result; + fn runtime_version(&self, ext: &mut dyn Externalities) -> error::Result; } #[cfg(test)] @@ -122,19 +61,25 @@ mod tests { use super::*; use sc_runtime_test::WASM_BINARY; use sp_io::TestExternalities; + use sp_wasm_interface::HostFunctions; + use sp_core::traits::CallInWasm; #[test] fn call_in_interpreted_wasm_works() { let mut ext = TestExternalities::default(); let mut ext = ext.ext(); - let res = call_in_wasm::( + + let executor = WasmExecutor::new( + WasmExecutionMethod::Interpreted, + Some(8), + sp_io::SubstrateHostFunctions::host_functions(), + true, + ); + let res = executor.call_in_wasm( + &WASM_BINARY[..], "test_empty_return", &[], - WasmExecutionMethod::Interpreted, &mut ext, - &WASM_BINARY, - 8, - true, ).unwrap(); assert_eq!(res, vec![0u8; 0]); } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 4fe7a205f53f9eec793c33cea0fe7ca452ff5f9d..dfc88d2ede746d73e339481419316d077a9bdae3 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -16,19 +16,15 @@ use crate::{ RuntimeInfo, error::{Error, Result}, - wasm_runtime::{RuntimesCache, WasmExecutionMethod}, + wasm_runtime::{RuntimeCache, WasmExecutionMethod, CodeSource}, }; use sp_version::{NativeVersion, RuntimeVersion}; use codec::{Decode, Encode}; use sp_core::{NativeOrEncoded, traits::{CodeExecutor, Externalities}}; use log::trace; -use std::{result, cell::RefCell, panic::{UnwindSafe, AssertUnwindSafe}, sync::Arc}; +use std::{result, panic::{UnwindSafe, AssertUnwindSafe}, sync::Arc}; use sp_wasm_interface::{HostFunctions, Function}; -use sc_executor_common::wasm_runtime::WasmRuntime; - -thread_local! { - static RUNTIMES_CACHE: RefCell = RefCell::new(RuntimesCache::new()); -} +use sc_executor_common::wasm_runtime::WasmInstance; /// Default num of pages for the heap const DEFAULT_HEAP_PAGES: u64 = 1024; @@ -75,46 +71,43 @@ pub trait NativeExecutionDispatch: Send + Sync { fn native_version() -> NativeVersion; } -/// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence -/// and dispatch to native code when possible, falling back on `WasmExecutor` when not. -pub struct NativeExecutor { - /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: std::marker::PhantomData, +/// An abstraction over Wasm code executor. Supports selecting execution backend and +/// manages runtime cache. +#[derive(Clone)] +pub struct WasmExecutor { /// Method used to execute fallback Wasm code. - fallback_method: WasmExecutionMethod, - /// Native runtime version info. - native_version: NativeVersion, + method: WasmExecutionMethod, /// The number of 64KB pages to allocate for Wasm execution. default_heap_pages: u64, /// The host functions registered with this instance. host_functions: Arc>, + /// WASM runtime cache. + cache: Arc, + /// Allow missing function imports. + allow_missing_func_imports: bool, } -impl NativeExecutor { +impl WasmExecutor { /// Create new instance. /// /// # Parameters /// - /// `fallback_method` - Method used to execute fallback Wasm code. + /// `method` - Method used to execute Wasm code. /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. - pub fn new(fallback_method: WasmExecutionMethod, default_heap_pages: Option) -> Self { - let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); - // Add the old and deprecated host functions as well, so that we support old wasm runtimes. - host_functions.extend( - crate::deprecated_host_interface::SubstrateExternals::host_functions(), - ); - - // Add the custom host functions provided by the user. - host_functions.extend(D::ExtendHostFunctions::host_functions()); - - NativeExecutor { - _dummy: Default::default(), - fallback_method, - native_version: D::native_version(), + pub fn new( + method: WasmExecutionMethod, + default_heap_pages: Option, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, + ) -> Self { + WasmExecutor { + method, default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), host_functions: Arc::new(host_functions), + cache: Arc::new(RuntimeCache::new()), + allow_missing_func_imports, } } @@ -131,46 +124,90 @@ impl NativeExecutor { /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already /// implicitly handled as unwind safe, as we store it in a global variable while executing the /// native runtime. - fn with_runtime( + fn with_instance<'c, R, F>( &self, - ext: &mut E, - f: impl for<'a> FnOnce( - AssertUnwindSafe<&'a mut (dyn WasmRuntime + 'static)>, - &'a RuntimeVersion, - AssertUnwindSafe<&'a mut E>, + code: CodeSource<'c>, + ext: &mut dyn Externalities, + f: F, + ) -> Result + where F: FnOnce( + AssertUnwindSafe<&dyn WasmInstance>, + Option<&RuntimeVersion>, + AssertUnwindSafe<&mut dyn Externalities>, ) -> Result>, - ) -> Result where E: Externalities { - RUNTIMES_CACHE.with(|cache| { - let mut cache = cache.borrow_mut(); - let (runtime, version, code_hash) = cache.fetch_runtime( - ext, - self.fallback_method, - self.default_heap_pages, - &*self.host_functions, - )?; - - let runtime = AssertUnwindSafe(runtime); - let ext = AssertUnwindSafe(ext); - - match f(runtime, version, ext) { - Ok(res) => res, - Err(e) => { - cache.invalidate_runtime(self.fallback_method, code_hash); - Err(e) - } + { + match self.cache.with_instance( + code, + ext, + self.method, + self.default_heap_pages, + &*self.host_functions, + self.allow_missing_func_imports, + |instance, version, ext| { + let instance = AssertUnwindSafe(instance); + let ext = AssertUnwindSafe(ext); + f(instance, version, ext) } - }) + )? { + Ok(r) => r, + Err(e) => Err(e), + } } } -impl Clone for NativeExecutor { - fn clone(&self) -> Self { +impl sp_core::traits::CallInWasm for WasmExecutor { + fn call_in_wasm( + &self, + wasm_blob: &[u8], + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> std::result::Result, String> { + self.with_instance(CodeSource::Custom(wasm_blob), ext, |instance, _, mut ext| { + with_externalities_safe( + &mut **ext, + move || instance.call(method, call_data), + ) + }).map_err(|e| e.to_string()) + } +} + +/// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence +/// and dispatch to native code when possible, falling back on `WasmExecutor` when not. +pub struct NativeExecutor { + /// Dummy field to avoid the compiler complaining about us not using `D`. + _dummy: std::marker::PhantomData, + /// Native runtime version info. + native_version: NativeVersion, + /// Fallback wasm executor. + wasm: WasmExecutor, +} + +impl NativeExecutor { + /// Create new instance. + /// + /// # Parameters + /// + /// `fallback_method` - Method used to execute fallback Wasm code. + /// + /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. + /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + pub fn new(fallback_method: WasmExecutionMethod, default_heap_pages: Option) -> Self { + let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); + + // Add the custom host functions provided by the user. + host_functions.extend(D::ExtendHostFunctions::host_functions()); + let wasm_executor = WasmExecutor::new( + fallback_method, + default_heap_pages, + host_functions, + false, + ); + NativeExecutor { _dummy: Default::default(), - fallback_method: self.fallback_method, native_version: D::native_version(), - default_heap_pages: self.default_heap_pages, - host_functions: self.host_functions.clone(), + wasm: wasm_executor, } } } @@ -180,90 +217,109 @@ impl RuntimeInfo for NativeExecutor { &self.native_version } - fn runtime_version( + fn runtime_version( &self, - ext: &mut E, + ext: &mut dyn Externalities, ) -> Result { - self.with_runtime(ext, |_runtime, version, _ext| Ok(Ok(version.clone()))) + self.wasm.with_instance(CodeSource::Externalities, ext, + |_instance, version, _ext| + Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) + ) } } impl CodeExecutor for NativeExecutor { type Error = Error; - fn call - < - E: Externalities, + fn call< R: Decode + Encode + PartialEq, NC: FnOnce() -> result::Result + UnwindSafe, >( &self, - ext: &mut E, + ext: &mut dyn Externalities, method: &str, data: &[u8], use_native: bool, native_call: Option, - ) -> (Result>, bool){ + ) -> (Result>, bool) { let mut used_native = false; - let result = self.with_runtime(ext, |mut runtime, onchain_version, mut ext| { - match ( - use_native, - onchain_version.can_call_with(&self.native_version.runtime_version), - native_call, - ) { - (_, false, _) => { - trace!( - target: "executor", - "Request for native execution failed (native: {}, chain: {})", - self.native_version.runtime_version, - onchain_version, - ); - - with_externalities_safe( - &mut **ext, - move || runtime.call(method, data).map(NativeOrEncoded::Encoded) - ) - } - (false, _, _) => { - with_externalities_safe( - &mut **ext, - move || runtime.call(method, data).map(NativeOrEncoded::Encoded) - ) - }, - (true, true, Some(call)) => { - trace!( - target: "executor", - "Request for native execution with native call succeeded (native: {}, chain: {}).", - self.native_version.runtime_version, - onchain_version, - ); - - used_native = true; - let res = with_externalities_safe(&mut **ext, move || (call)()) - .and_then(|r| r - .map(NativeOrEncoded::Native) - .map_err(|s| Error::ApiError(s.to_string())) + let result = self.wasm.with_instance( + CodeSource::Externalities, + ext, + |instance, onchain_version, mut ext| { + let onchain_version = onchain_version.ok_or_else( + || Error::ApiError("Unknown version".into()) + )?; + match ( + use_native, + onchain_version.can_call_with(&self.native_version.runtime_version), + native_call, + ) { + (_, false, _) => { + trace!( + target: "executor", + "Request for native execution failed (native: {}, chain: {})", + self.native_version.runtime_version, + onchain_version, ); - Ok(res) - } - _ => { - trace!( - target: "executor", - "Request for native execution succeeded (native: {}, chain: {})", - self.native_version.runtime_version, - onchain_version - ); - - used_native = true; - Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) + with_externalities_safe( + &mut **ext, + move || instance.call(method, data).map(NativeOrEncoded::Encoded) + ) + } + (false, _, _) => { + with_externalities_safe( + &mut **ext, + move || instance.call(method, data).map(NativeOrEncoded::Encoded) + ) + }, + (true, true, Some(call)) => { + trace!( + target: "executor", + "Request for native execution with native call succeeded \ + (native: {}, chain: {}).", + self.native_version.runtime_version, + onchain_version, + ); + + used_native = true; + let res = with_externalities_safe(&mut **ext, move || (call)()) + .and_then(|r| r + .map(NativeOrEncoded::Native) + .map_err(|s| Error::ApiError(s.to_string())) + ); + + Ok(res) + } + _ => { + trace!( + target: "executor", + "Request for native execution succeeded (native: {}, chain: {})", + self.native_version.runtime_version, + onchain_version + ); + + used_native = true; + Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) + } } } - }); + ); (result, used_native) } } +impl Clone for NativeExecutor { + fn clone(&self) -> Self { + NativeExecutor { + _dummy: Default::default(), + native_version: D::native_version(), + wasm: self.wasm.clone(), + } + } +} + impl sp_core::traits::CallInWasm for NativeExecutor { fn call_in_wasm( &self, @@ -272,16 +328,7 @@ impl sp_core::traits::CallInWasm for NativeExecutor< call_data: &[u8], ext: &mut dyn Externalities, ) -> std::result::Result, String> { - crate::call_in_wasm_with_host_functions( - method, - call_data, - self.fallback_method, - ext, - wasm_blob, - self.default_heap_pages, - (*self.host_functions).clone(), - false, - ).map_err(|e| e.to_string()) + sp_core::traits::CallInWasm::call_in_wasm(&self.wasm, wasm_blob, method, call_data, ext) } } @@ -383,7 +430,7 @@ mod tests { let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None); my_interface::HostFunctions::host_functions().iter().for_each(|function| { assert_eq!( - executor.host_functions.iter().filter(|f| f == &function).count(), + executor.wasm.host_functions.iter().filter(|f| f == &function).count(), 2, ); }); diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 9d54246ee07638562fd7ae95ab2cbd0e9efb808a..180baf0a2f8089cfd0d093ca4cf4a2bb835e0ea0 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -19,13 +19,15 @@ //! The primary means of accessing the runtimes is through a cache which saves the reusable //! components of the runtime that are expensive to initialize. +use std::sync::Arc; +use std::borrow::Cow; use crate::error::{Error, WasmError}; -use log::{trace, warn}; +use parking_lot::{Mutex, RwLock}; use codec::Decode; use sp_core::{storage::well_known_keys, traits::Externalities}; use sp_version::RuntimeVersion; -use std::{collections::hash_map::{Entry, HashMap}, panic::AssertUnwindSafe}; -use sc_executor_common::wasm_runtime::WasmRuntime; +use std::panic::AssertUnwindSafe; +use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; use sp_wasm_interface::Function; @@ -39,15 +41,33 @@ pub enum WasmExecutionMethod { Compiled, } +/// Executoed code origin. +pub enum CodeSource<'a> { + /// Take code from storage, + Externalities, + /// Use provided code, + Custom(&'a [u8]), +} + /// A Wasm runtime object along with its cached runtime version. struct VersionedRuntime { - runtime: Box, + /// Runtime code hash. + code_hash: Vec, + /// Wasm runtime type. + wasm_method: WasmExecutionMethod, + /// Shared runtime that can spawn instances. + module: Box, /// The number of WebAssembly heap pages this instance was created with. heap_pages: u64, - /// Runtime version according to `Core_version`. - version: RuntimeVersion, + /// Runtime version according to `Core_version` if any. + version: Option, + /// Cached instance pool. + instances: RwLock<[Option>>>; MAX_INSTANCES]>, } +const MAX_RUNTIMES: usize = 2; +const MAX_INSTANCES: usize = 8; + /// Cache for the runtimes. /// /// When an instance is requested for the first time it is added to this cache. Metadata is kept @@ -60,130 +80,184 @@ struct VersionedRuntime { /// /// For now the cache grows indefinitely, but that should be fine for now since runtimes can only be /// upgraded rarely and there are no other ways to make the node to execute some other runtime. -pub struct RuntimesCache { - /// A cache of runtime instances along with metadata, ready to be reused. +pub struct RuntimeCache { + /// A cache of runtimes along with metadata. /// - /// Instances are keyed by the Wasm execution method and the hash of their code. - instances: HashMap<(WasmExecutionMethod, Vec), Result>, + /// Runtimes sorted by recent usage. The most recently used is at the front. + runtimes: Mutex<[Option>; MAX_RUNTIMES]>, } -impl RuntimesCache { +impl RuntimeCache { /// Creates a new instance of a runtimes cache. - pub fn new() -> RuntimesCache { - RuntimesCache { - instances: HashMap::new(), + pub fn new() -> RuntimeCache { + RuntimeCache { + runtimes: Default::default(), } } - /// Fetches an instance of the runtime. - /// - /// On first use we create a new runtime instance, save it to the cache - /// and persist its initial memory. - /// - /// Each subsequent request will return this instance, with its memory restored - /// to the persisted initial memory. Thus, we reuse one single runtime instance - /// for every `fetch_runtime` invocation. + /// Prepares a WASM module instance and executes given function for it. /// + /// This uses internal cache to find avaiable instance or create a new one. /// # Parameters /// + /// `code` - Provides external code or tells the executor to fetch it from storage. + /// /// `ext` - Externalities to use for the runtime. This is used for setting /// up an initial runtime instance. /// /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. /// + /// `wasm_method` - Type of WASM backend to use. + /// /// `host_functions` - The host functions that should be registered for the Wasm runtime. /// - /// # Return value + /// `allow_missing_func_imports` - Ignore missing function imports. /// - /// If no error occurred a tuple `(&mut WasmRuntime, H256)` is - /// returned. `H256` is the hash of the runtime code. + /// `f` - Function to execute. /// + /// # Returns result of `f` wrapped in an additonal result. /// In case of failure one of two errors can be returned: /// /// `Err::InvalidCode` is returned for runtime code issues. /// /// `Error::InvalidMemoryReference` is returned if no memory export with the /// identifier `memory` can be found in the runtime. - pub fn fetch_runtime( - &mut self, - ext: &mut E, + pub fn with_instance<'c, R, F>( + &self, + code: CodeSource<'c>, + ext: &mut dyn Externalities, wasm_method: WasmExecutionMethod, default_heap_pages: u64, host_functions: &[&'static dyn Function], - ) -> Result<(&mut (dyn WasmRuntime + 'static), &RuntimeVersion, Vec), Error> { - let code_hash = ext - .original_storage_hash(well_known_keys::CODE) - .ok_or(Error::InvalidCode("`CODE` not found in storage.".into()))?; - - let heap_pages = ext - .storage(well_known_keys::HEAP_PAGES) - .and_then(|pages| u64::decode(&mut &pages[..]).ok()) - .unwrap_or(default_heap_pages); + allow_missing_func_imports: bool, + f: F, + ) -> Result, Error> + where F: FnOnce( + &dyn WasmInstance, + Option<&RuntimeVersion>, + &mut dyn Externalities) + -> Result, + { + let (code_hash, heap_pages) = match &code { + CodeSource::Externalities => { + ( + ext + .original_storage_hash(well_known_keys::CODE) + .ok_or(Error::InvalidCode("`CODE` not found in storage.".into()))?, + ext + .storage(well_known_keys::HEAP_PAGES) + .and_then(|pages| u64::decode(&mut &pages[..]).ok()) + .unwrap_or(default_heap_pages), + ) + }, + CodeSource::Custom(code) => { + (sp_core::blake2_256(code).to_vec(), default_heap_pages) + } + }; - let result = match self.instances.entry((wasm_method, code_hash.clone())) { - Entry::Occupied(o) => { - let result = o.into_mut(); - if let Ok(ref mut cached_runtime) = result { - let heap_pages_changed = cached_runtime.heap_pages != heap_pages; - let host_functions_changed = cached_runtime.runtime.host_functions() - != host_functions; - if heap_pages_changed || host_functions_changed { - let changed = if heap_pages_changed { - "heap_pages" - } else { - "host functions" - }; + let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f + let pos = runtimes.iter().position(|r| r.as_ref().map_or( + false, + |r| r.wasm_method == wasm_method && + r.code_hash == code_hash && + r.heap_pages == heap_pages + )); - trace!( - target: "runtimes_cache", - "{} were changed. Reinstantiating the instance", - changed, - ); - *result = create_versioned_wasm_runtime( - ext, - wasm_method, - heap_pages, - host_functions.into(), - ); - if let Err(ref err) = result { - warn!(target: "runtimes_cache", "cannot create a runtime: {:?}", err); - } + let runtime = match pos { + Some(n) => runtimes[n] + .clone() + .expect("`position` only returns `Some` for entries that are `Some`"), + None => { + let code = match code { + CodeSource::Externalities => { + Cow::Owned(ext.original_storage(well_known_keys::CODE) + .ok_or(WasmError::CodeNotFound)?) } - } - result - }, - Entry::Vacant(v) => { - trace!(target: "runtimes_cache", "no instance found in cache, creating now."); + CodeSource::Custom(code) => { + Cow::Borrowed(code) + } + }; + let result = create_versioned_wasm_runtime( + &code, + code_hash, ext, wasm_method, heap_pages, host_functions.into(), + allow_missing_func_imports, ); if let Err(ref err) = result { - warn!(target: "runtimes_cache", "cannot create a runtime: {:?}", err); + log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); } - v.insert(result) + Arc::new(result?) } }; - result.as_mut() - .map(|entry| (entry.runtime.as_mut(), &entry.version, code_hash)) - .map_err(|ref e| Error::InvalidCode(format!("{:?}", e))) - } + // Rearrange runtimes by last recently used. + match pos { + Some(0) => {}, + Some(n) => { + for i in (1 .. n + 1).rev() { + runtimes.swap(i, i - 1); + } + } + None => { + runtimes[MAX_RUNTIMES-1] = Some(runtime.clone()); + for i in (1 .. MAX_RUNTIMES).rev() { + runtimes.swap(i, i - 1); + } + } + } + drop(runtimes); - /// Invalidate the runtime for the given `wasm_method` and `code_hash`. - /// - /// Invalidation of a runtime is useful when there was a `panic!` in native while executing it. - /// The `panic!` maybe have brought the runtime into a poisoned state and so, it is better to - /// invalidate this runtime instance. - pub fn invalidate_runtime( - &mut self, - wasm_method: WasmExecutionMethod, - code_hash: Vec, - ) { - // Just remove the instance, it will be re-created the next time it is requested. - self.instances.remove(&(wasm_method, code_hash)); + let result = { + // Find a free instance + let instance_pool = runtime.instances.read().clone(); + let instance = instance_pool + .iter() + .find_map(|i| i.as_ref().and_then(|i| i.try_lock())); + if let Some(mut locked) = instance { + let result = f(&**locked, runtime.version.as_ref(), ext); + if let Err(e) = &result { + log::warn!(target: "wasm-runtime", "Evicting failed runtime instance: {:?}", e); + *locked = runtime.module.new_instance()?; + } + result + } else { + // Allocate a new instance + let instance = runtime.module.new_instance()?; + + let result = f(&*instance, runtime.version.as_ref(), ext); + match &result { + Ok(_) => { + let mut instance_pool = runtime.instances.write(); + if let Some(ref mut slot) = instance_pool.iter_mut().find(|s| s.is_none()) { + **slot = Some(Arc::new(Mutex::new(instance))); + log::debug!( + target: "wasm-runtime", + "Allocated WASM instance {}/{}", + instance_pool.len(), + MAX_INSTANCES, + ); + } else { + log::warn!(target: "wasm-runtime", "Ran out of free WASM instances"); + } + } + Err(e) => { + log::warn!( + target: + "wasm-runtime", + "Fresh runtime instance failed with {:?}", + e, + ); + } + } + result + } + }; + + Ok(result) } } @@ -194,28 +268,43 @@ pub fn create_wasm_runtime_with_code( code: &[u8], host_functions: Vec<&'static dyn Function>, allow_missing_func_imports: bool, -) -> Result, WasmError> { +) -> Result, WasmError> { match wasm_method { WasmExecutionMethod::Interpreted => - sc_executor_wasmi::create_instance(code, heap_pages, host_functions, allow_missing_func_imports) - .map(|runtime| -> Box { Box::new(runtime) }), + sc_executor_wasmi::create_runtime( + code, + heap_pages, + host_functions, + allow_missing_func_imports + ).map(|runtime| -> Box { Box::new(runtime) }), #[cfg(feature = "wasmtime")] WasmExecutionMethod::Compiled => - sc_executor_wasmtime::create_instance(code, heap_pages, host_functions, allow_missing_func_imports) - .map(|runtime| -> Box { Box::new(runtime) }), + sc_executor_wasmtime::create_runtime( + code, + heap_pages, + host_functions, + allow_missing_func_imports + ).map(|runtime| -> Box { Box::new(runtime) }), } } -fn create_versioned_wasm_runtime( - ext: &mut E, +fn create_versioned_wasm_runtime( + code: &[u8], + code_hash: Vec, + ext: &mut dyn Externalities, wasm_method: WasmExecutionMethod, heap_pages: u64, host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, ) -> Result { - let code = ext - .original_storage(well_known_keys::CODE) - .ok_or(WasmError::CodeNotFound)?; - let mut runtime = create_wasm_runtime_with_code(wasm_method, heap_pages, &code, host_functions, false)?; + let time = std::time::Instant::now(); + let mut runtime = create_wasm_runtime_with_code( + wasm_method, + heap_pages, + &code, + host_functions, + allow_missing_func_imports, + )?; // Call to determine runtime version. let version_result = { @@ -224,21 +313,33 @@ fn create_versioned_wasm_runtime( // The following unwind safety assertion is OK because if the method call panics, the // runtime will be dropped. - let mut runtime = AssertUnwindSafe(runtime.as_mut()); + let runtime = AssertUnwindSafe(runtime.as_mut()); crate::native_executor::with_externalities_safe( &mut **ext, - move || runtime.call("Core_version", &[]) + move || runtime.new_instance()?.call("Core_version", &[]) ).map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? }; - let encoded_version = version_result - .map_err(|e| WasmError::Instantiation(format!("failed to call \"Core_version\": {}", e)))?; - let version = RuntimeVersion::decode(&mut encoded_version.as_slice()) - .map_err(|_| WasmError::Instantiation("failed to decode \"Core_version\" result".into()))?; + let version = match version_result { + Ok(version) => Some(RuntimeVersion::decode(&mut version.as_slice()) + .map_err(|_| + WasmError::Instantiation("failed to decode \"Core_version\" result".into()) + )?), + Err(_) => None, + }; + log::debug!( + target: "wasm-runtime", + "Prepared new runtime version {:?} in {} ms.", + version, + time.elapsed().as_millis(), + ); Ok(VersionedRuntime { - runtime, + code_hash, + module: runtime, version, heap_pages, + wasm_method, + instances: Default::default(), }) } diff --git a/client/executor/src/wasm_utils.rs b/client/executor/src/wasm_utils.rs deleted file mode 100644 index 539e210a9467e0825d146637aaef0b4608722392..0000000000000000000000000000000000000000 --- a/client/executor/src/wasm_utils.rs +++ /dev/null @@ -1,173 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Utilities for defining the wasm host environment. - -/// Converts arguments into respective WASM types. -#[macro_export] -macro_rules! convert_args { - () => ([]); - ( $( $t:ty ),* ) => ( [ $( <$t as $crate::sp_wasm_interface::IntoValue>::VALUE_TYPE, )* ] ); -} - -/// Generates a WASM signature for given list of parameters. -#[macro_export] -macro_rules! gen_signature { - ( ( $( $params: ty ),* ) ) => ( - $crate::sp_wasm_interface::Signature { - args: std::borrow::Cow::Borrowed(&convert_args!( $( $params ),* )[..]), - return_value: None, - } - ); - ( ( $( $params: ty ),* ) -> $returns:ty ) => ( - $crate::sp_wasm_interface::Signature { - args: std::borrow::Cow::Borrowed(&convert_args!( $( $params ),* )[..]), - return_value: Some(<$returns as $crate::sp_wasm_interface::IntoValue>::VALUE_TYPE), - } - ); -} - -macro_rules! gen_functions { - (@INTERNAL - { $( $generated:tt )* } - $context:ident, - ) => ( - vec![ $( $generated )* ] - ); - (@INTERNAL - { $( $generated:tt )* } - $context:ident, - $name:ident ( $( $names:ident: $params:ty ),* ) $( -> $returns:ty )? { $( $body:tt )* } - $( $tail:tt )* - ) => ( - gen_functions! { - @INTERNAL - { - $( $generated )* - { - struct $name; - - #[allow(unused)] - impl $crate::sp_wasm_interface::Function for $name { - fn name(&self) -> &str { - stringify!($name) - } - fn signature(&self) -> $crate::sp_wasm_interface::Signature { - gen_signature!( ( $( $params ),* ) $( -> $returns )? ) - } - fn execute( - &self, - context: &mut dyn $crate::sp_wasm_interface::FunctionContext, - args: &mut dyn Iterator, - ) -> ::std::result::Result, String> { - let mut $context = context; - marshall! { - args, - ( $( $names : $params ),* ) $( -> $returns )? => { $( $body )* } - } - } - } - - &$name as &dyn $crate::sp_wasm_interface::Function - }, - } - $context, - $( $tail )* - } - ); - - ( $context:ident, $( $tail:tt )* ) => ( - gen_functions!(@INTERNAL {} $context, $($tail)*); - ); -} - -/// Converts the list of arguments coming from WASM into their native types. -#[macro_export] -macro_rules! unmarshall_args { - ( $body:tt, $args_iter:ident, $( $names:ident : $params:ty ),*) => ({ - $( - let $names : $params = - $args_iter.next() - .and_then(|val| <$params as $crate::sp_wasm_interface::TryFromValue>::try_from_value(val)) - .expect( - "`$args_iter` comes from an argument of Externals::execute_function; - args to an external call always matches the signature of the external; - external signatures are built with count and types and in order defined by `$params`; - here, we iterating on `$params`; - qed; - " - ); - )* - $body - }) -} - -/// Since we can't specify the type of closure directly at binding site: -/// -/// ```nocompile -/// let f: FnOnce() -> Result<::NativeType, _> = || { /* ... */ }; -/// ``` -/// -/// we use this function to constrain the type of the closure. -#[inline(always)] -pub fn constrain_closure(f: F) -> F -where - F: FnOnce() -> Result -{ - f -} - -/// Pass the list of parameters by converting them to respective WASM types. -#[macro_export] -macro_rules! marshall { - ( $args_iter:ident, ( $( $names:ident : $params:ty ),* ) -> $returns:ty => $body:tt ) => ({ - let body = $crate::wasm_utils::constrain_closure::<$returns, _>(|| { - unmarshall_args!($body, $args_iter, $( $names : $params ),*) - }); - let r = body()?; - return Ok(Some($crate::sp_wasm_interface::IntoValue::into_value(r))) - }); - ( $args_iter:ident, ( $( $names:ident : $params:ty ),* ) => $body:tt ) => ({ - let body = $crate::wasm_utils::constrain_closure::<(), _>(|| { - unmarshall_args!($body, $args_iter, $( $names : $params ),*) - }); - body()?; - return Ok(None) - }) -} - -/// Implements the wasm host interface for the given type. -#[macro_export] -macro_rules! impl_wasm_host_interface { - ( - impl $interface_name:ident where $context:ident { - $( - $name:ident($( $names:ident : $params:ty ),* $(,)? ) $( -> $returns:ty )? - { $( $body:tt )* } - )* - } - ) => ( - impl $crate::sp_wasm_interface::HostFunctions for $interface_name { - #[allow(non_camel_case_types)] - fn host_functions() -> Vec<&'static dyn $crate::sp_wasm_interface::Function> { - gen_functions!( - $context, - $( $name( $( $names: $params ),* ) $( -> $returns )? { $( $body )* } )* - ) - } - } - ); -} diff --git a/client/executor/wasmi/Cargo.toml b/client/executor/wasmi/Cargo.toml index 9e968fdc685f0896abb3b7593fc425e2e5af6c17..aa259e29f8dfb3dfb564b3b8060af8f238f537e6 100644 --- a/client/executor/wasmi/Cargo.toml +++ b/client/executor/wasmi/Cargo.toml @@ -1,17 +1,21 @@ [package] name = "sc-executor-wasmi" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "This crate provides an implementation of `WasmRuntime` that is baked by wasmi." +documentation = "https://docs.rs/sc-execturo-wasmi" [dependencies] log = "0.4.8" wasmi = "0.6.2" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor-common = { version = "0.8", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-executor-common = { version = "0.8.0-alpha.2", path = "../common" } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index a0e11dfcf8b93f4799624688e84c8e109f98ca9d..6348c2413357f03f9de161d4ff537cf8a77e818d 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -14,10 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! This crate provides an implementation of `WasmRuntime` that is baked by wasmi. +//! This crate provides an implementation of `WasmModule` that is baked by wasmi. use sc_executor_common::{error::{Error, WasmError}, sandbox}; -use std::{str, mem, cell::RefCell}; +use std::{str, mem, cell::RefCell, sync::Arc}; use wasmi::{ Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, memory_units::Pages, RuntimeValue::{I32, I64, self}, @@ -30,7 +30,7 @@ use sp_wasm_interface::{ FunctionContext, Pointer, WordSize, Sandbox, MemoryId, Result as WResult, Function, }; use sp_runtime_interface::unpack_ptr_and_len; -use sc_executor_common::wasm_runtime::WasmRuntime; +use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; struct FunctionExecutor<'a> { sandbox_store: sandbox::Store, @@ -623,9 +623,77 @@ impl StateSnapshot { } } -/// A runtime along with its initial state snapshot. -#[derive(Clone)] +/// A runtime along with initial copy of data segments. pub struct WasmiRuntime { + /// A wasm module. + module: Module, + /// The host functions registered for this instance. + host_functions: Arc>, + /// Enable stub generation for functions that are not available in `host_functions`. + /// These stubs will error when the wasm blob tries to call them. + allow_missing_func_imports: bool, + /// Numer of heap pages this runtime uses. + heap_pages: u64, + /// Data segments created for each new instance. + data_segments: Vec, +} + +impl WasmModule for WasmiRuntime { + fn new_instance(&self) -> Result, Error> { + // Instantiate this module. + let (instance, missing_functions, memory) = instantiate_module( + self.heap_pages as usize, + &self.module, + &self.host_functions, + self.allow_missing_func_imports, + ).map_err(|e| WasmError::Instantiation(e.to_string()))?; + + // Take state snapshot before executing anything. + let state_snapshot = StateSnapshot::take(&instance, self.data_segments.clone()) + .expect( + "`take` returns `Err` if the module is not valid; + we already loaded module above, thus the `Module` is proven to be valid at this point; + qed + ", + ); + + Ok(Box::new(WasmiInstance { + instance, + memory, + state_snapshot, + host_functions: self.host_functions.clone(), + allow_missing_func_imports: self.allow_missing_func_imports, + missing_functions, + })) + } +} + +/// Create a new `WasmiRuntime` given the code. This function loads the module and +/// stores it in the instance. +pub fn create_runtime( + code: &[u8], + heap_pages: u64, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, +) -> Result { + let module = Module::from_buffer(&code).map_err(|_| WasmError::InvalidModule)?; + + // Extract the data segments from the wasm code. + // + // A return of this error actually indicates that there is a problem in logic, since + // we just loaded and validated the `module` above. + let data_segments = extract_data_segments(&code)?; + Ok(WasmiRuntime { + module, + data_segments, + host_functions: Arc::new(host_functions), + allow_missing_func_imports, + heap_pages, + }) +} + +/// Wasmi instance wrapper along with the state snapshot. +pub struct WasmiInstance { /// A wasm module instance. instance: ModuleRef, /// The memory instance of used by the wasm module. @@ -633,7 +701,7 @@ pub struct WasmiRuntime { /// The snapshot of the instance's state taken just after the instantiation. state_snapshot: StateSnapshot, /// The host functions registered for this instance. - host_functions: Vec<&'static dyn Function>, + host_functions: Arc>, /// Enable stub generation for functions that are not available in `host_functions`. /// These stubs will error when the wasm blob tries to call them. allow_missing_func_imports: bool, @@ -641,13 +709,12 @@ pub struct WasmiRuntime { missing_functions: Vec, } -impl WasmRuntime for WasmiRuntime { - fn host_functions(&self) -> &[&'static dyn Function] { - &self.host_functions - } +// This is safe because `WasmiInstance` does not leak any references to `self.memory` and `self.instance` +unsafe impl Send for WasmiInstance {} +impl WasmInstance for WasmiInstance { fn call( - &mut self, + &self, method: &str, data: &[u8], ) -> Result, Error> { @@ -664,67 +731,26 @@ impl WasmRuntime for WasmiRuntime { &self.memory, method, data, - &self.host_functions, + self.host_functions.as_ref(), self.allow_missing_func_imports, - &self.missing_functions, + self.missing_functions.as_ref(), ) } - fn get_global_val(&self, name: &str) -> Result, Error> { + fn get_global_const(&self, name: &str) -> Result, Error> { match self.instance.export_by_name(name) { Some(global) => Ok(Some( global - .as_global() - .ok_or_else(|| format!("`{}` is not a global", name))? - .get() - .into() + .as_global() + .ok_or_else(|| format!("`{}` is not a global", name))? + .get() + .into() )), None => Ok(None), } } } -pub fn create_instance( - code: &[u8], - heap_pages: u64, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, -) -> Result { - let module = Module::from_buffer(&code).map_err(|_| WasmError::InvalidModule)?; - - // Extract the data segments from the wasm code. - // - // A return of this error actually indicates that there is a problem in logic, since - // we just loaded and validated the `module` above. - let data_segments = extract_data_segments(&code)?; - - // Instantiate this module. - let (instance, missing_functions, memory) = instantiate_module( - heap_pages as usize, - &module, - &host_functions, - allow_missing_func_imports, - ).map_err(|e| WasmError::Instantiation(e.to_string()))?; - - // Take state snapshot before executing anything. - let state_snapshot = StateSnapshot::take(&instance, data_segments) - .expect( - "`take` returns `Err` if the module is not valid; - we already loaded module above, thus the `Module` is proven to be valid at this point; - qed - ", - ); - - Ok(WasmiRuntime { - instance, - memory, - state_snapshot, - host_functions, - allow_missing_func_imports, - missing_functions, - }) -} - /// Extract the data segments from the given wasm code. /// /// Returns `Err` if the given wasm code cannot be deserialized. diff --git a/client/executor/wasmtime/Cargo.toml b/client/executor/wasmtime/Cargo.toml index eb41adb2714e03b8b99291c70a6dbabb668020ee..9f8784cc981b7d1a79438846255d17afcf27d9d3 100644 --- a/client/executor/wasmtime/Cargo.toml +++ b/client/executor/wasmtime/Cargo.toml @@ -1,22 +1,24 @@ [package] name = "sc-executor-wasmtime" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Defines a `WasmRuntime` that uses the Wasmtime JIT to execute." [dependencies] log = "0.4.8" -wasmi = "0.6.2" +scoped-tls = "1.0" parity-wasm = "0.41.0" -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor-common = { version = "0.8", path = "../common" } -sp-wasm-interface = { version = "2.0.0", path = "../../../primitives/wasm-interface" } -sp-runtime-interface = { version = "2.0.0", path = "../../../primitives/runtime-interface" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-allocator = { version = "2.0.0", path = "../../../primitives/allocator" } - -wasmtime = "0.11" +codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-executor-common = { version = "0.8.0-alpha.2", path = "../common" } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/wasm-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime-interface" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-allocator = { version = "2.0.0-alpha.2", path = "../../../primitives/allocator" } +wasmtime = { git = "https://github.com/paritytech/wasmtime", branch = "a-thread-safe-api" } [dev-dependencies] assert_matches = "1.3.0" diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index e0cc6ecc9ae7a1dd738352a0d287ff9d5ad7ff5b..29187ac66338e87e8694bb89a4dc0fa19c9e534d 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -19,7 +19,7 @@ use crate::instance_wrapper::InstanceWrapper; use crate::util; -use std::cell::RefCell; +use std::{cell::RefCell, rc::Rc}; use log::trace; use codec::{Encode, Decode}; use sp_allocator::FreeingBumpHeapAllocator; @@ -51,12 +51,12 @@ pub struct HostState { // borrow after performing necessary queries/changes. sandbox_store: RefCell>, allocator: RefCell, - instance: InstanceWrapper, + instance: Rc, } impl HostState { /// Constructs a new `HostState`. - pub fn new(allocator: FreeingBumpHeapAllocator, instance: InstanceWrapper) -> Self { + pub fn new(allocator: FreeingBumpHeapAllocator, instance: Rc) -> Self { HostState { sandbox_store: RefCell::new(sandbox::Store::new()), allocator: RefCell::new(allocator), @@ -64,11 +64,6 @@ impl HostState { } } - /// Destruct the host state and extract the `InstanceWrapper` passed at the creation. - pub fn into_instance(self) -> InstanceWrapper { - self.instance - } - /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. pub fn materialize<'a>(&'a self) -> HostContext<'a> { HostContext(self) diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index 349f84a0d74d27031ea3d3cba613cb3f36f02fbf..48299ffd62de1caef2e03016d9b8060188548365 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::state_holder::StateHolder; +use crate::state_holder; use sc_executor_common::error::WasmError; use sp_wasm_interface::{Function, Value, ValueType}; use std::any::Any; @@ -34,7 +34,6 @@ pub struct Imports { /// Goes over all imports of a module and prepares a vector of `Extern`s that can be used for /// instantiation of the module. Returns an error if there are imports that cannot be satisfied. pub fn resolve_imports( - state_holder: &StateHolder, module: &Module, host_functions: &[&'static dyn Function], heap_pages: u32, @@ -58,7 +57,6 @@ pub fn resolve_imports( } _ => resolve_func_import( module, - state_holder, import_ty, host_functions, allow_missing_func_imports, @@ -112,7 +110,6 @@ fn resolve_memory_import( fn resolve_func_import( module: &Module, - state_holder: &StateHolder, import_ty: &ImportType, host_functions: &[&'static dyn Function], allow_missing_func_imports: bool, @@ -152,7 +149,7 @@ fn resolve_func_import( ))); } - Ok(HostFuncHandler::new(&state_holder, *host_func).into_extern(module)) + Ok(HostFuncHandler::new(*host_func).into_extern(module)) } /// Returns `true` if `lhs` and `rhs` represent the same signature. @@ -163,14 +160,12 @@ fn signature_matches(lhs: &wasmtime::FuncType, rhs: &wasmtime::FuncType) -> bool /// This structure implements `Callable` and acts as a bridge between wasmtime and /// substrate host functions. struct HostFuncHandler { - state_holder: StateHolder, host_func: &'static dyn Function, } impl HostFuncHandler { - fn new(state_holder: &StateHolder, host_func: &'static dyn Function) -> Self { + fn new(host_func: &'static dyn Function) -> Self { Self { - state_holder: state_holder.clone(), host_func, } } @@ -188,7 +183,7 @@ impl Callable for HostFuncHandler { wasmtime_params: &[Val], wasmtime_results: &mut [Val], ) -> Result<(), wasmtime::Trap> { - let unwind_result = self.state_holder.with_context(|host_ctx| { + let unwind_result = state_holder::with_context(|host_ctx| { let mut host_ctx = host_ctx.expect( "host functions can be called only from wasm instance; wasm instance is always called initializing context; diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 8f4801e6da1d0de171ff50d2e8b04c0a08d1f00d..66e4e085235ac3adc2e4026679ec849d002ed94a 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -23,4 +23,4 @@ mod imports; mod instance_wrapper; mod util; -pub use runtime::create_instance; +pub use runtime::create_runtime; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index b99d3347872059d778f50c3c00581252e224a167..02acd33e69a628705675105e8644bad177bfc15a 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -15,57 +15,85 @@ // along with Substrate. If not, see . //! Defines the compiled Wasm runtime that uses Wasmtime internally. +use std::rc::Rc; +use std::sync::Arc; use crate::host::HostState; -use crate::imports::{resolve_imports, Imports}; +use crate::imports::{Imports, resolve_imports}; use crate::instance_wrapper::InstanceWrapper; -use crate::state_holder::StateHolder; +use crate::state_holder; use sc_executor_common::{ error::{Error, Result, WasmError}, - wasm_runtime::WasmRuntime, + wasm_runtime::{WasmModule, WasmInstance}, }; use sp_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{Function, Pointer, WordSize, Value}; use wasmtime::{Config, Engine, Module, Store}; -/// A `WasmRuntime` implementation using wasmtime to compile the runtime module to machine code +/// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - module: Module, - imports: Imports, - state_holder: StateHolder, + module: Arc, heap_pages: u32, + allow_missing_func_imports: bool, host_functions: Vec<&'static dyn Function>, } -impl WasmRuntime for WasmtimeRuntime { - fn host_functions(&self) -> &[&'static dyn Function] { - &self.host_functions +impl WasmModule for WasmtimeRuntime { + fn new_instance(&self) -> Result> { + // Scan all imports, find the matching host functions, and create stubs that adapt arguments + // and results. + let imports = resolve_imports( + &self.module, + &self.host_functions, + self.heap_pages, + self.allow_missing_func_imports, + )?; + + Ok(Box::new(WasmtimeInstance { + module: self.module.clone(), + imports, + heap_pages: self.heap_pages, + })) } +} + +/// A `WasmInstance` implementation that reuses compiled module and spawns instances +/// to execute the compiled code. +pub struct WasmtimeInstance { + module: Arc, + imports: Imports, + heap_pages: u32, +} + +// This is safe because `WasmtimeInstance` does not leak reference to `self.imports` +// and all imports don't reference any anything, other than host functions and memory +unsafe impl Send for WasmtimeInstance {} - fn call(&mut self, method: &str, data: &[u8]) -> Result> { +impl WasmInstance for WasmtimeInstance { + fn call(&self, method: &str, data: &[u8]) -> Result> { + // TODO: reuse the instance and reset globals after call + // https://github.com/paritytech/substrate/issues/5141 + let instance = Rc::new(InstanceWrapper::new(&self.module, &self.imports, self.heap_pages)?); call_method( - &self.module, - &mut self.imports, - &self.state_holder, + instance, method, data, - self.heap_pages, ) } - fn get_global_val(&self, name: &str) -> Result> { - // Yeah, there is no better way currently :( - InstanceWrapper::new(&self.module, &self.imports, self.heap_pages)? - .get_global_val(name) + fn get_global_const(&self, name: &str) -> Result> { + let instance = InstanceWrapper::new(&self.module, &self.imports, self.heap_pages)?; + instance.get_global_val(name) } } + /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. -pub fn create_instance( +pub fn create_runtime( code: &[u8], heap_pages: u64, host_functions: Vec<&'static dyn Function>, @@ -80,55 +108,37 @@ pub fn create_instance( let module = Module::new(&store, code) .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; - let state_holder = StateHolder::empty(); - - // Scan all imports, find the matching host functions, and create stubs that adapt arguments - // and results. - let imports = resolve_imports( - &state_holder, - &module, - &host_functions, - heap_pages as u32, - allow_missing_func_imports, - )?; - Ok(WasmtimeRuntime { - module, - imports, - state_holder, + module: Arc::new(module), heap_pages: heap_pages as u32, + allow_missing_func_imports, host_functions, }) } /// Call a function inside a precompiled Wasm module. fn call_method( - module: &Module, - imports: &mut Imports, - state_holder: &StateHolder, + instance_wrapper: Rc, method: &str, data: &[u8], - heap_pages: u32, ) -> Result> { - let instance_wrapper = InstanceWrapper::new(module, imports, heap_pages)?; let entrypoint = instance_wrapper.resolve_entrypoint(method)?; let heap_base = instance_wrapper.extract_heap_base()?; let allocator = FreeingBumpHeapAllocator::new(heap_base); - perform_call(data, state_holder, instance_wrapper, entrypoint, allocator) + perform_call(data, instance_wrapper, entrypoint, allocator) } fn perform_call( data: &[u8], - state_holder: &StateHolder, - instance_wrapper: InstanceWrapper, + instance_wrapper: Rc, entrypoint: wasmtime::Func, mut allocator: FreeingBumpHeapAllocator, ) -> Result> { let (data_ptr, data_len) = inject_input_data(&instance_wrapper, &mut allocator, data)?; - let host_state = HostState::new(allocator, instance_wrapper); - let (ret, host_state) = state_holder.with_initialized_state(host_state, || { + let host_state = HostState::new(allocator, instance_wrapper.clone()); + let ret = state_holder::with_initialized_state(&host_state, || { match entrypoint.call(&[ wasmtime::Val::I32(u32::from(data_ptr) as i32), wasmtime::Val::I32(u32::from(data_len) as i32), @@ -146,9 +156,7 @@ fn perform_call( } }); let (output_ptr, output_len) = ret?; - - let instance = host_state.into_instance(); - let output = extract_output_data(&instance, output_ptr, output_len)?; + let output = extract_output_data(&instance_wrapper, output_ptr, output_len)?; Ok(output) } diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs index 57564ed3ec414506e1960c41654e42be96e44576..42cb79e7a356807176033266dd0a4f22eacb5103 100644 --- a/client/executor/wasmtime/src/state_holder.rs +++ b/client/executor/wasmtime/src/state_holder.rs @@ -15,63 +15,29 @@ // along with Substrate. If not, see . use crate::host::{HostContext, HostState}; -use std::cell::RefCell; -use std::rc::Rc; -/// A common place to store a reference to the `HostState`. -/// -/// This structure is passed into each host function handler and retained in the implementation of -/// `WasmRuntime`. Whenever a call into a runtime method is initiated, the host state is populated -/// with the state for that runtime method call. +scoped_tls::scoped_thread_local!(static HOST_STATE: HostState); + +/// Provide `HostState` for the runtime method call and execute the given function `f`. /// -/// During the execution of the runtime method call, wasm can call imported host functions. When -/// that happens the host function handler gets a `HostContext` (obtainable through having a -/// `HostState` reference). -#[derive(Clone)] -pub struct StateHolder { - // This is `Some` only during a call. - state: Rc>>, +/// During the execution of the provided function `with_context` will be callable. +pub fn with_initialized_state(s: &HostState, f: F) -> R +where + F: FnOnce() -> R, +{ + HOST_STATE.set(s, f) } -impl StateHolder { - /// Create a placeholder `StateHolder`. - pub fn empty() -> StateHolder { - StateHolder { - state: Rc::new(RefCell::new(None)), - } - } - - /// Provide `HostState` for the runtime method call and execute the given function `f`. - /// - /// During the execution of the provided function `with_context` will be callable. - pub fn with_initialized_state(&self, state: HostState, f: F) -> (R, HostState) - where - F: FnOnce() -> R, - { - *self.state.borrow_mut() = Some(state); - - let ret = f(); - let state = self - .state - .borrow_mut() - .take() - .expect("cannot be None since was just assigned; qed"); - - (ret, state) - } - - /// Create a `HostContext` from the contained `HostState` and execute the given function `f`. - /// - /// This function is only callable within closure passed to `init_state`. Otherwise, the passed - /// context will be `None`. - pub fn with_context(&self, f: F) -> R - where - F: FnOnce(Option) -> R, - { - let state = self.state.borrow(); - match *state { - Some(ref state) => f(Some(state.materialize())), - None => f(None), - } +/// Create a `HostContext` from the contained `HostState` and execute the given function `f`. +/// +/// This function is only callable within closure passed to `init_state`. Otherwise, the passed +/// context will be `None`. +pub fn with_context(f: F) -> R +where + F: FnOnce(Option) -> R, +{ + if !HOST_STATE.is_set() { + return f(None) } + HOST_STATE.with(|state| f(Some(state.materialize()))) } diff --git a/client/finality-grandpa/Cargo.toml b/client/finality-grandpa/Cargo.toml index e96792258a279cd507f2ecba583925e5742af9b3..6cee0cd8526f6dcbd2ed9019b172dc30b009ca05 100644 --- a/client/finality-grandpa/Cargo.toml +++ b/client/finality-grandpa/Cargo.toml @@ -1,47 +1,54 @@ [package] name = "sc-finality-grandpa" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Integration of the GRANDPA finality gadget into substrate." +documentation = "https://docs.rs/sc-finality-grandpa" + [dependencies] -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-alpha.2", path = "../../utils/fork-tree" } futures = "0.3.1" futures-timer = "3.0.1" log = "0.4.8" parking_lot = "0.10.0" rand = "0.7.2" assert_matches = "1.3.0" -parity-scale-codec = { version = "1.0.0", features = ["derive"] } -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-keystore = { version = "2.0.0", path = "../keystore" } +parity-scale-codec = { version = "1.2.0", features = ["derive"] } +sp-arithmetic = { version = "2.0.0-alpha.2", path = "../../primitives/arithmetic" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-consensus = { version = "0.8.0-alpha.1", path = "../../primitives/consensus/common" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } serde_json = "1.0.41" -sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "0.8", path = "../" } -sp-inherents = { version = "2.0.0", path = "../../primitives/inherents" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sc-network = { version = "0.8", path = "../network" } -sc-network-gossip = { version = "0.8", path = "../network-gossip" } -sp-finality-tracker = { version = "2.0.0", path = "../../primitives/finality-tracker" } -sp-finality-grandpa = { version = "2.0.0", path = "../../primitives/finality-grandpa" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-client = { version = "0.8.0-alpha.2", path = "../" } +sp-inherents = { version = "2.0.0-alpha.2", path = "../../primitives/inherents" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-network-gossip = { version = "0.8.0-alpha.2", path = "../network-gossip" } +sp-finality-tracker = { version = "2.0.0-alpha.2", path = "../../primitives/finality-tracker" } +sp-finality-grandpa = { version = "2.0.0-alpha.2", path = "../../primitives/finality-grandpa" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus", version = "0.8.0-alpha.2" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } finality-grandpa = { version = "0.11.1", features = ["derive-codec"] } pin-project = "0.4.6" [dev-dependencies] finality-grandpa = { version = "0.11.1", features = ["derive-codec", "test-helpers"] } -sc-network = { version = "0.8", path = "../network" } -sc-network-test = { version = "0.8.0", path = "../network/test" } -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8", path = "../../primitives/consensus/babe" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-network-test = { version = "0.8.0-dev", path = "../network/test" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } env_logger = "0.7.0" -tokio = "0.1.22" +tokio = { version = "0.2", features = ["rt-core"] } tempfile = "3.1.0" -sp-api = { version = "2.0.0", path = "../../primitives/api" } -futures01 = { package = "futures", version = "0.1.29" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 050a3c8642fde81056af71f6076fd5534f9c8a6d..1f4e22359712fa2f398bd846e1fbe1620a47efa4 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -65,6 +65,7 @@ mod periodic; pub(crate) mod tests; pub use sp_finality_grandpa::GRANDPA_ENGINE_ID; +pub const GRANDPA_PROTOCOL_NAME: &[u8] = b"/paritytech/grandpa/1"; // cost scalars for reporting peers. mod cost { @@ -119,9 +120,8 @@ pub trait Network: GossipNetwork + Clone + Send + 'static fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); } -impl Network for Arc> where +impl Network for Arc> where B: BlockT, - S: sc_network::specialization::NetworkSpecialization, H: sc_network::ExHashT, { fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { @@ -185,7 +185,12 @@ impl> NetworkBridge { ); let validator = Arc::new(validator); - let gossip_engine = GossipEngine::new(service.clone(), GRANDPA_ENGINE_ID, validator.clone()); + let gossip_engine = GossipEngine::new( + service.clone(), + GRANDPA_ENGINE_ID, + GRANDPA_PROTOCOL_NAME, + validator.clone() + ); { // register all previous votes with the gossip service so that they're diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index 040ee4c7bbd2257e6b188a07ee32b557ddd821e4..96761a2f3c07c567aaefdc3f0540dee3c0cabb1c 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -25,7 +25,7 @@ use std::sync::Arc; use sp_keyring::Ed25519Keyring; use parity_scale_codec::Encode; use sp_runtime::{ConsensusEngineId, traits::NumberFor}; -use std::{pin::Pin, task::{Context, Poll}}; +use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; use crate::environment::SharedVoterSetState; use sp_finality_grandpa::{AuthorityList, GRANDPA_ENGINE_ID}; use super::gossip::{self, GossipValidator}; @@ -44,20 +44,12 @@ pub(crate) struct TestNetwork { sender: mpsc::UnboundedSender, } -impl TestNetwork { - fn event_stream_03(&self) -> Pin + Send>> { +impl sc_network_gossip::Network for TestNetwork { + fn event_stream(&self) -> Pin + Send>> { let (tx, rx) = mpsc::unbounded(); let _ = self.sender.unbounded_send(Event::EventStream(tx)); Box::pin(rx) } -} - -impl sc_network_gossip::Network for TestNetwork { - fn event_stream(&self) -> Box + Send> { - Box::new( - self.event_stream_03().map(Ok::<_, ()>).compat() - ) - } fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); @@ -69,7 +61,7 @@ impl sc_network_gossip::Network for TestNetwork { let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); } - fn register_notifications_protocol(&self, _: ConsensusEngineId) {} + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} fn announce(&self, block: Hash, _associated_data: Vec) { let _ = self.sender.unbounded_send(Event::Announce(block)); diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index fd88113776c9c02c9feca3292e096e3958d50693..a0f37f20cb390926cb6c43662e1370772863313b 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -25,18 +25,11 @@ use parity_scale_codec::{Decode, Encode}; use futures::prelude::*; use futures_timer::Delay; use parking_lot::RwLock; -use sp_blockchain::{HeaderBackend, Error as ClientError}; - -use sc_client_api::{ - BlockchainEvents, - backend::{AuxStore, Backend}, - Finalizer, - call_executor::CallExecutor, - utils::is_descendent_of, -}; -use sc_client::{ - apply_aux, Client, -}; +use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; +use std::marker::PhantomData; + +use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sc_client::apply_aux; use finality_grandpa::{ BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState, voter, voter_set::VoterSet, @@ -62,6 +55,7 @@ use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; use crate::voting_rule::VotingRule; use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId, RoundNumber}; +use prometheus_endpoint::{Gauge, U64, register, PrometheusError}; type HistoricalVotes = finality_grandpa::HistoricalVotes< ::Hash, @@ -376,9 +370,27 @@ impl SharedVoterSetState { } } +/// Prometheus metrics for GRANDPA. +#[derive(Clone)] +pub(crate) struct Metrics { + finality_grandpa_round: Gauge, +} + +impl Metrics { + pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + Ok(Self { + finality_grandpa_round: register( + Gauge::new("finality_grandpa_round", "Highest completed GRANDPA round.")?, + registry + )?, + }) + } +} + + /// The environment we run GRANDPA in. -pub(crate) struct Environment, RA, SC, VR> { - pub(crate) client: Arc>, +pub(crate) struct Environment, SC, VR> { + pub(crate) client: Arc, pub(crate) select_chain: SC, pub(crate) voters: Arc>, pub(crate) config: Config, @@ -388,9 +400,11 @@ pub(crate) struct Environment, RA, SC, V pub(crate) set_id: SetId, pub(crate) voter_set_state: SharedVoterSetState, pub(crate) voting_rule: VR, + pub(crate) metrics: Option, + pub(crate) _phantom: PhantomData, } -impl, RA, SC, VR> Environment { +impl, SC, VR> Environment { /// Updates the voter set state using the given closure. The write lock is /// held during evaluation of the closure and the environment's voter set /// state is set to its result if successful. @@ -400,23 +414,33 @@ impl, RA, SC, VR> Environment +impl finality_grandpa::Chain> -for Environment +for Environment where Block: 'static, - B: Backend + 'static, - E: CallExecutor + Send + Sync, + BE: Backend, + C: crate::ClientForGrandpa, N: NetworkT + 'static + Send, SC: SelectChain + 'static, - VR: VotingRule>, - RA: Send + Sync, + VR: VotingRule, NumberFor: BlockNumberOps, { fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { @@ -432,7 +456,7 @@ where return None; } - let base_header = match self.client.header(&BlockId::Hash(block)).ok()? { + let base_header = match self.client.header(BlockId::Hash(block)).ok()? { Some(h) => h, None => { debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); @@ -450,7 +474,7 @@ where match self.select_chain.finality_target(block, None) { Ok(Some(best_hash)) => { - let best_header = self.client.header(&BlockId::Hash(best_hash)).ok()? + let best_header = self.client.header(BlockId::Hash(best_hash)).ok()? .expect("Header known to exist after `finality_target` call; qed"); // check if our vote is currently being limited due to a pending change @@ -474,7 +498,7 @@ where break; } - target_header = self.client.header(&BlockId::Hash(*target_header.parent_hash())).ok()? + target_header = self.client.header(BlockId::Hash(*target_header.parent_hash())).ok()? .expect("Header known to exist after `finality_target` call; qed"); } @@ -519,17 +543,16 @@ where } -pub(crate) fn ancestry( - client: &Client, +pub(crate) fn ancestry( + client: &Arc, base: Block::Hash, block: Block::Hash, ) -> Result, GrandpaError> where - B: Backend, - E: CallExecutor, + Client: HeaderMetadata, { if base == block { return Err(GrandpaError::NotDescendent) } - let tree_route_res = sp_blockchain::tree_route(client, block, base); + let tree_route_res = sp_blockchain::tree_route(&**client, block, base); let tree_route = match tree_route_res { Ok(tree_route) => tree_route, @@ -550,19 +573,17 @@ pub(crate) fn ancestry( Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) } -impl +impl voter::Environment> -for Environment +for Environment where Block: 'static, - B: Backend + 'static, - E: CallExecutor + 'static + Send + Sync, + B: Backend, + C: crate::ClientForGrandpa + 'static, N: NetworkT + 'static + Send, - RA: 'static + Send + Sync, SC: SelectChain + 'static, - VR: VotingRule>, + VR: VotingRule, NumberFor: BlockNumberOps, - Client: AuxStore, { type Timer = Pin> + Send>>; type Id = AuthorityId; @@ -882,7 +903,7 @@ where commit: Commit, ) -> Result<(), Self::Error> { finalize_block( - &*self.client, + self.client.clone(), &self.authority_set, &self.consensus_changes, Some(self.config.justification_period.into()), @@ -940,8 +961,8 @@ impl From> for JustificationOrCommit< /// authority set change is enacted then a justification is created (if not /// given) and stored with the block when finalizing it. /// This method assumes that the block being finalized has already been imported. -pub(crate) fn finalize_block( - client: &Client, +pub(crate) fn finalize_block( + client: Arc, authority_set: &SharedAuthoritySet>, consensus_changes: &SharedConsensusChanges>, justification_period: Option>, @@ -949,16 +970,16 @@ pub(crate) fn finalize_block( number: NumberFor, justification_or_commit: JustificationOrCommit, ) -> Result<(), CommandOrError>> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, + Block: BlockT, + BE: Backend, + Client: crate::ClientForGrandpa, { // NOTE: lock must be held through writing to DB to avoid race. this lock // also implicitly synchronizes the check for last finalized number // below. let mut authority_set = authority_set.inner().write(); - let status = client.chain_info(); + let status = client.info(); if number <= status.finalized_number && client.hash(number)? == Some(hash) { // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since // the voter will be restarted at the median last finalized block, which can be lower than the local best @@ -981,14 +1002,14 @@ pub(crate) fn finalize_block( let mut consensus_changes = consensus_changes.lock(); let canon_at_height = |canon_number| { // "true" because the block is finalized - canonical_at_height(client, (hash, number), true, canon_number) + canonical_at_height(&*client, (hash, number), true, canon_number) }; let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { let status = authority_set.apply_standard_changes( hash, number, - &is_descendent_of::(client, None), + &is_descendent_of::(&*client, None), ).map_err(|e| Error::Safety(e.to_string()))?; // check if this is this is the first finalization of some consensus changes @@ -1031,7 +1052,7 @@ pub(crate) fn finalize_block( // finalization to remote nodes if !justification_required { if let Some(justification_period) = justification_period { - let last_finalized_number = client.chain_info().finalized_number; + let last_finalized_number = client.info().finalized_number; justification_required = (!last_finalized_number.is_zero() || number - last_finalized_number == justification_period) && (last_finalized_number / justification_period != number / justification_period); @@ -1040,7 +1061,7 @@ pub(crate) fn finalize_block( if justification_required { let justification = GrandpaJustification::from_commit( - client, + &client, round_number, commit, )?; diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 9da99ab531ae59044c71b10794e0ced3b1546632..2c85839b5e3640ca19398767dffb42ea4a3afa05 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -34,16 +34,15 @@ //! finality proof (that finalizes some block C that is ancestor of the B and descendant //! of the U) could be returned. -use std::iter; use std::sync::Arc; use log::{trace, warn}; use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::Backend, CallExecutor, StorageProof, + backend::Backend, StorageProof, light::{FetchChecker, RemoteReadRequest}, + StorageProvider, ProofProvider, }; -use sc_client::Client; use parity_scale_codec::{Encode, Decode}; use finality_grandpa::BlockNumberOps; use sp_runtime::{ @@ -67,12 +66,25 @@ pub trait AuthoritySetForFinalityProver: Send + Sync { fn prove_authorities(&self, block: &BlockId) -> ClientResult; } -/// Client-based implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Client +/// Trait that combines `StorageProvider` and `ProofProvider` +pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync where - B: Backend + Send + Sync + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, + Block: BlockT, + BE: Backend + Send + Sync, +{} + +/// Blanket implementation. +impl StorageAndProofProvider for P + where + Block: BlockT, + BE: Backend + Send + Sync, + P: StorageProvider + ProofProvider + Send + Sync, +{} + +/// Implementation of AuthoritySetForFinalityProver. +impl AuthoritySetForFinalityProver for Arc> + where + BE: Backend + Send + Sync + 'static, { fn authorities(&self, block: &BlockId) -> ClientResult { let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); @@ -83,7 +95,7 @@ impl AuthoritySetForFinalityProver for Client) -> ClientResult { - self.read_proof(block, iter::once(GRANDPA_AUTHORITIES_KEY)) + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) } } @@ -146,15 +158,17 @@ impl FinalityProofProvider /// /// - backend for accessing blockchain data; /// - authority_provider for calling and proving runtime methods. - pub fn new( + pub fn new

( backend: Arc, - authority_provider: Arc>, - ) -> Self { - FinalityProofProvider { backend, authority_provider } + authority_provider: P, + ) -> Self + where P: AuthoritySetForFinalityProver + 'static, + { + FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) } } } -impl sc_network::FinalityProofProvider for FinalityProofProvider +impl sc_network::config::FinalityProofProvider for FinalityProofProvider where Block: BlockT, NumberFor: BlockNumberOps, diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index 2eb1b4a7c889c3929c57ac3149e373a6c86c736f..ea1deccdafbf5d4a428947917b797779ff0957fe 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -21,9 +21,10 @@ use parity_scale_codec::Encode; use futures::channel::mpsc; use parking_lot::RwLockWriteGuard; -use sp_blockchain::{HeaderBackend, BlockStatus, well_known_cache_keys}; -use sc_client_api::{backend::{TransactionFor, Backend}, CallExecutor, utils::is_descendent_of}; -use sc_client::Client; +use sp_blockchain::{BlockStatus, well_known_cache_keys}; +use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sp_api::{TransactionFor}; + use sp_consensus::{ BlockImport, Error as ConsensusError, BlockCheckParams, BlockImportParams, ImportResult, JustificationImport, @@ -41,6 +42,7 @@ use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingCha use crate::consensus_changes::SharedConsensusChanges; use crate::environment::finalize_block; use crate::justification::GrandpaJustification; +use std::marker::PhantomData; /// A block-import handler for GRANDPA. /// @@ -51,16 +53,17 @@ use crate::justification::GrandpaJustification; /// /// When using GRANDPA, the block import worker should be using this block import /// object. -pub struct GrandpaBlockImport { - inner: Arc>, +pub struct GrandpaBlockImport { + inner: Arc, select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: mpsc::UnboundedSender>>, consensus_changes: SharedConsensusChanges>, + _phantom: PhantomData, } -impl Clone for - GrandpaBlockImport +impl Clone for + GrandpaBlockImport { fn clone(&self) -> Self { GrandpaBlockImport { @@ -69,24 +72,24 @@ impl Clone for authority_set: self.authority_set.clone(), send_voter_commands: self.send_voter_commands.clone(), consensus_changes: self.consensus_changes.clone(), + _phantom: PhantomData, } } } -impl JustificationImport - for GrandpaBlockImport where +impl JustificationImport + for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, + BE: Backend, + Client: crate::ClientForGrandpa, SC: SelectChain, { type Error = ConsensusError; fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { let mut out = Vec::new(); - let chain_info = self.inner.chain_info(); + let chain_info = self.inner.info(); // request justifications for all pending changes for which change blocks have already been imported let authorities = self.authority_set.inner().read(); @@ -105,7 +108,7 @@ impl JustificationImport }; if let Ok(Some(hash)) = effective_block_hash { - if let Ok(Some(header)) = self.inner.header(&BlockId::Hash(hash)) { + if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { if *header.number() == pending_change.effective_number() { out.push((header.hash(), *header.number())); } @@ -123,7 +126,7 @@ impl JustificationImport number: NumberFor, justification: Justification, ) -> Result<(), Self::Error> { - self.import_justification(hash, number, justification, false) + GrandpaBlockImport::import_justification(self, hash, number, justification, false) } } @@ -200,14 +203,13 @@ fn find_forced_change(header: &B::Header) header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) } -impl - GrandpaBlockImport +impl + GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, + BE: Backend, + Client: crate::ClientForGrandpa, { // check for a new authority set change. fn check_new_change(&self, header: &Block::Header, hash: Block::Hash) @@ -235,11 +237,11 @@ where }) } - fn make_authorities_changes<'a>( - &'a self, - block: &mut BlockImportParams>, + fn make_authorities_changes( + &self, + block: &mut BlockImportParams>, hash: Block::Hash, - ) -> Result, ConsensusError> { + ) -> Result, ConsensusError> { // when we update the authorities, we need to hold the lock // until the block is written to prevent a race if we need to restore // the old authority set on error or panic. @@ -325,10 +327,10 @@ where // for the canon block the new authority set should start // with. we use the minimum between the median and the local // best finalized block. - let best_finalized_number = self.inner.chain_info().finalized_number; + let best_finalized_number = self.inner.info().finalized_number; let canon_number = best_finalized_number.min(median_last_finalized_number); let canon_hash = - self.inner.header(&BlockId::Number(canon_number)) + self.inner.header(BlockId::Number(canon_number)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect("the given block number is less or equal than the current best finalized number; \ current best finalized number must exist in chain; qed.") @@ -380,18 +382,17 @@ where } } -impl BlockImport - for GrandpaBlockImport where +impl BlockImport + for GrandpaBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, - for<'a> &'a Client: - BlockImport>, + BE: Backend, + Client: crate::ClientForGrandpa, + for<'a> &'a Client: + BlockImport>, { type Error = ConsensusError; - type Transaction = TransactionFor; + type Transaction = TransactionFor; fn import_block( &mut self, @@ -521,31 +522,30 @@ impl BlockImport } } -impl GrandpaBlockImport { +impl GrandpaBlockImport { pub(crate) fn new( - inner: Arc>, + inner: Arc, select_chain: SC, authority_set: SharedAuthoritySet>, send_voter_commands: mpsc::UnboundedSender>>, consensus_changes: SharedConsensusChanges>, - ) -> GrandpaBlockImport { + ) -> GrandpaBlockImport { GrandpaBlockImport { inner, select_chain, authority_set, send_voter_commands, consensus_changes, + _phantom: PhantomData, } } } -impl - GrandpaBlockImport +impl GrandpaBlockImport where + BE: Backend, + Client: crate::ClientForGrandpa, NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, { /// Import a block justification and finalize the block. @@ -572,7 +572,7 @@ where }; let result = finalize_block( - &*self.inner, + self.inner.clone(), &self.authority_set, &self.consensus_changes, None, diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index ad96956454fd727a57b67969756e9d7d012d94e5..084c0042ab19a08fe5f17e31aeffd1820958fcb0 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -15,10 +15,9 @@ // along with Substrate. If not, see . use std::collections::{HashMap, HashSet}; +use std::sync::Arc; -use sc_client::Client; -use sc_client_api::{CallExecutor, backend::Backend}; -use sp_blockchain::Error as ClientError; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use parity_scale_codec::{Encode, Decode}; use finality_grandpa::voter_set::VoterSet; use finality_grandpa::{Error as GrandpaError}; @@ -47,14 +46,12 @@ pub struct GrandpaJustification { impl GrandpaJustification { /// Create a GRANDPA justification from the given commit. This method /// assumes the commit is valid and well-formed. - pub(crate) fn from_commit( - client: &Client, + pub(crate) fn from_commit( + client: &Arc, round: u64, commit: Commit, ) -> Result, Error> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, + C: HeaderBackend, { let mut votes_ancestries_hashes = HashSet::new(); let mut votes_ancestries = Vec::new(); @@ -69,7 +66,7 @@ impl GrandpaJustification { loop { if current_hash == commit.target_hash { break; } - match client.header(&BlockId::Hash(current_hash))? { + match client.header(BlockId::Hash(current_hash))? { Some(current_header) => { if *current_header.number() <= commit.target_number { return error(); diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index e931271df91001b79276f12fd5acb1d36642ff33..afee2bec53576a56c3aeba69716ad4d144bbd851 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -56,15 +56,18 @@ use futures::prelude::*; use futures::StreamExt; use log::{debug, info}; use futures::channel::mpsc; -use sc_client_api::{BlockchainEvents, CallExecutor, backend::{AuxStore, Backend}, ExecutionStrategy}; -use sp_blockchain::{HeaderBackend, Error as ClientError}; -use sc_client::Client; +use sc_client_api::{ + backend::{AuxStore, Backend}, + LockImportRun, BlockchainEvents, CallExecutor, + ExecutionStrategy, Finalizer, TransactionFor, ExecutorProvider, +}; +use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; use parity_scale_codec::{Decode, Encode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; use sc_keystore::KeyStorePtr; use sp_inherents::InherentDataProviders; -use sp_consensus::SelectChain; +use sp_consensus::{SelectChain, BlockImport}; use sp_core::Pair; use sc_telemetry::{telemetry, CONSENSUS_INFO, CONSENSUS_DEBUG}; use serde_json; @@ -93,16 +96,15 @@ mod observer; mod until_imported; mod voting_rule; -pub use finality_proof::FinalityProofProvider; +pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider}; pub use justification::GrandpaJustification; pub use light_import::light_block_import; -pub use observer::run_grandpa_observer; pub use voting_rule::{ BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder }; use aux_schema::PersistentData; -use environment::{Environment, VoterSetState}; +use environment::{Environment, VoterSetState, Metrics}; use import::GrandpaBlockImport; use until_imported::UntilGlobalMessageBlocksImported; use communication::{NetworkBridge, Network as NetworkT}; @@ -110,6 +112,8 @@ use sp_finality_grandpa::{AuthorityList, AuthorityPair, AuthoritySignature, SetI // Re-export these two because it's just so damn convenient. pub use sp_finality_grandpa::{AuthorityId, ScheduledChange}; +use sp_api::ProvideRuntimeApi; +use std::marker::PhantomData; #[cfg(test)] mod tests; @@ -246,10 +250,8 @@ pub(crate) trait BlockStatus { fn block_number(&self, hash: Block::Hash) -> Result>, Error>; } -impl BlockStatus for Arc> where - B: Backend, - E: CallExecutor + Send + Sync, - RA: Send + Sync, +impl BlockStatus for Arc where + Client: HeaderBackend, NumberFor: BlockNumberOps, { fn block_number(&self, hash: Block::Hash) -> Result>, Error> { @@ -258,6 +260,29 @@ impl BlockStatus for Arc } } +/// A trait that includes all the client functionalities grandpa requires. +/// Ideally this would be a trait alias, we're not there yet. +/// tracking issue https://github.com/rust-lang/rust/issues/41517 +pub trait ClientForGrandpa: + LockImportRun + Finalizer + AuxStore + + HeaderMetadata + HeaderBackend + + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider + + BlockImport, Error = sp_consensus::Error> + where + BE: Backend, + Block: BlockT, +{} + +impl ClientForGrandpa for T + where + BE: Backend, + Block: BlockT, + T: LockImportRun + Finalizer + AuxStore + + HeaderMetadata + HeaderBackend + + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider + + BlockImport, Error = sp_consensus::Error>, +{} + /// Something that one can ask to do a block sync request. pub(crate) trait BlockSyncRequester { /// Notifies the sync service to try and sync the given block from the given @@ -349,8 +374,8 @@ impl fmt::Display for CommandOrError { } } -pub struct LinkHalf { - client: Arc>, +pub struct LinkHalf { + client: Arc, select_chain: SC, persistent_data: PersistentData, voter_commands_rx: mpsc::UnboundedReceiver>>, @@ -362,11 +387,8 @@ pub trait GenesisAuthoritySetProvider { fn get(&self) -> Result; } -impl GenesisAuthoritySetProvider for Client - where - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync, - RA: Send + Sync, +impl GenesisAuthoritySetProvider for Arc> + where E: CallExecutor, { fn get(&self) -> Result { // This implementation uses the Grandpa runtime API instead of reading directly from the @@ -391,22 +413,20 @@ impl GenesisAuthoritySetProvider for Client( - client: Arc>, +pub fn block_import( + client: Arc, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, select_chain: SC, ) -> Result<( - GrandpaBlockImport, - LinkHalf + GrandpaBlockImport, + LinkHalf, ), ClientError> where - B: Backend + 'static, - E: CallExecutor + Send + Sync, - RA: Send + Sync, SC: SelectChain, - Client: AuxStore, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, { - let chain_info = client.chain_info(); + let chain_info = client.info(); let genesis_hash = chain_info.genesis_hash; let persistent_data = aux_schema::load_persistent( @@ -441,10 +461,10 @@ where )) } -fn global_communication( +fn global_communication( set_id: SetId, voters: &Arc>, - client: &Arc>, + client: Arc, network: &NetworkBridge, keystore: &Option, ) -> ( @@ -456,10 +476,9 @@ fn global_communication( Error = CommandOrError>, > + Unpin, ) where - B: Backend, - E: CallExecutor + Send + Sync, + BE: Backend + 'static, + C: ClientForGrandpa + 'static, N: NetworkT, - RA: Send + Sync, NumberFor: BlockNumberOps, { let is_voter = is_voter(voters, keystore).is_some(); @@ -488,20 +507,18 @@ fn global_communication( /// Register the finality tracker inherent data provider (which is used by /// GRANDPA), if not registered already. -fn register_finality_tracker_inherent_data_provider( - client: Arc>, +fn register_finality_tracker_inherent_data_provider( + client: Arc, inherent_data_providers: &InherentDataProviders, ) -> Result<(), sp_consensus::Error> where - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, + Client: HeaderBackend + 'static, { if !inherent_data_providers.has_provider(&sp_finality_tracker::INHERENT_IDENTIFIER) { inherent_data_providers .register_provider(sp_finality_tracker::InherentDataProvider::new(move || { #[allow(deprecated)] { - let info = client.chain_info(); + let info = client.info(); telemetry!(CONSENSUS_INFO; "afg.finalized"; "finalized_number" => ?info.finalized_number, "finalized_hash" => ?info.finalized_hash, @@ -516,50 +533,53 @@ fn register_finality_tracker_inherent_data_provider( } /// Parameters used to run Grandpa. -pub struct GrandpaParams { +pub struct GrandpaParams { /// Configuration for the GRANDPA service. pub config: Config, /// A link to the block import worker. - pub link: LinkHalf, + pub link: LinkHalf, /// The Network instance. pub network: N, /// The inherent data providers. pub inherent_data_providers: InherentDataProviders, - /// Handle to a future that will resolve on exit. - pub on_exit: X, /// If supplied, can be used to hook on telemetry connection established events. pub telemetry_on_connect: Option>, /// A voting rule used to potentially restrict target votes. pub voting_rule: VR, + /// The prometheus metrics registry. + pub prometheus_registry: Option, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. -pub fn run_grandpa_voter( - grandpa_params: GrandpaParams, +pub fn run_grandpa_voter( + grandpa_params: GrandpaParams, ) -> sp_blockchain::Result + Unpin + Send + 'static> where Block::Hash: Ord, - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, + BE: Backend + 'static, N: NetworkT + Send + Sync + Clone + 'static, SC: SelectChain + 'static, - VR: VotingRule> + Clone + 'static, + VR: VotingRule + Clone + 'static, NumberFor: BlockNumberOps, DigestFor: Encode, - RA: Send + Sync + 'static, - X: futures::Future + Clone + Send + Unpin + 'static, - Client: AuxStore, + C: ClientForGrandpa + 'static, { let GrandpaParams { - config, + mut config, link, network, inherent_data_providers, - on_exit, telemetry_on_connect, voting_rule, + prometheus_registry, } = grandpa_params; + // NOTE: we have recently removed `run_grandpa_observer` from the public + // API, I felt it is easier to just ignore this field rather than removing + // it from the config temporarily. This should be removed after #5013 is + // fixed and we re-add the observer to the public API. + config.observer_enabled = false; + let LinkHalf { client, select_chain, @@ -610,6 +630,7 @@ pub fn run_grandpa_voter( voting_rule, persistent_data, voter_commands_rx, + prometheus_registry, ); let voter_work = voter_work @@ -619,38 +640,37 @@ pub fn run_grandpa_voter( let telemetry_task = telemetry_task .then(|_| future::pending::<()>()); - Ok(future::select(future::select(voter_work, on_exit), telemetry_task).map(drop)) + Ok(future::select(voter_work, telemetry_task).map(drop)) } /// Future that powers the voter. #[must_use] -struct VoterWork, RA, SC, VR> { +struct VoterWork, SC, VR> { voter: Pin>>> + Send>>, - env: Arc>, + env: Arc>, voter_commands_rx: mpsc::UnboundedReceiver>>, network: NetworkBridge, } -impl VoterWork +impl VoterWork where Block: BlockT, + B: Backend + 'static, + C: ClientForGrandpa + 'static, N: NetworkT + Sync, NumberFor: BlockNumberOps, - RA: 'static + Send + Sync, - E: CallExecutor + Send + Sync + 'static, - B: Backend + 'static, SC: SelectChain + 'static, - VR: VotingRule> + Clone + 'static, - Client: AuxStore, + VR: VotingRule + Clone + 'static, { fn new( - client: Arc>, + client: Arc, config: Config, network: NetworkBridge, select_chain: SC, voting_rule: VR, persistent_data: PersistentData, voter_commands_rx: mpsc::UnboundedReceiver>>, + prometheus_registry: Option, ) -> Self { let voters = persistent_data.authority_set.current_authorities(); @@ -665,6 +685,11 @@ where authority_set: persistent_data.authority_set.clone(), consensus_changes: persistent_data.consensus_changes.clone(), voter_set_state: persistent_data.set_state.clone(), + metrics: prometheus_registry.map(|registry| { + Metrics::register(®istry) + .expect("Other metrics would have failed to register before these; qed") + }), + _phantom: PhantomData, }); let mut work = VoterWork { @@ -695,7 +720,7 @@ where "authority_id" => authority_id.to_string(), ); - let chain_info = self.env.client.chain_info(); + let chain_info = self.env.client.info(); telemetry!(CONSENSUS_INFO; "afg.authority_set"; "number" => ?chain_info.finalized_number, "hash" => ?chain_info.finalized_hash, @@ -719,7 +744,7 @@ where let global_comms = global_communication( self.env.set_id, &self.env.voters, - &self.env.client, + self.env.client.clone(), &self.env.network, &self.env.config.keystore, ); @@ -784,6 +809,8 @@ where consensus_changes: self.env.consensus_changes.clone(), network: self.env.network.clone(), voting_rule: self.env.voting_rule.clone(), + metrics: self.env.metrics.clone(), + _phantom: PhantomData, }); self.rebuild_voter(); @@ -808,17 +835,15 @@ where } } -impl Future for VoterWork +impl Future for VoterWork where Block: BlockT, + B: Backend + 'static, N: NetworkT + Sync, NumberFor: BlockNumberOps, - RA: 'static + Send + Sync, - E: CallExecutor + Send + Sync + 'static, - B: Backend + 'static, SC: SelectChain + 'static, - VR: VotingRule> + Clone + 'static, - Client: AuxStore, + C: ClientForGrandpa + 'static, + VR: VotingRule + Clone + 'static, { type Output = Result<(), Error>; @@ -863,15 +888,13 @@ where /// discards all GRANDPA messages (otherwise, we end up banning nodes that send /// us a `Neighbor` message, since there is no registered gossip validator for /// the engine id defined in the message.) -pub fn setup_disabled_grandpa( - client: Arc>, +pub fn setup_disabled_grandpa( + client: Arc, inherent_data_providers: &InherentDataProviders, network: N, ) -> Result<(), sp_consensus::Error> where - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, N: NetworkT + Send + Clone + 'static, + Client: HeaderBackend + 'static, { register_finality_tracker_inherent_data_provider( client, @@ -881,7 +904,10 @@ pub fn setup_disabled_grandpa( // We register the GRANDPA protocol so that we don't consider it an anomaly // to receive GRANDPA messages on the network. We don't process the // messages. - network.register_notifications_protocol(communication::GRANDPA_ENGINE_ID); + network.register_notifications_protocol( + communication::GRANDPA_ENGINE_ID, + From::from(communication::GRANDPA_PROTOCOL_NAME), + ); Ok(()) } diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 0181a93f1088cb35c46715a9db4f32b937950f21..276f5d0f28d7ab1c9c3d376bc23ce4f55f03b1a7 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -18,8 +18,9 @@ use std::collections::HashMap; use std::sync::Arc; use log::{info, trace, warn}; use parking_lot::RwLock; -use sc_client::Client; -use sc_client_api::{CallExecutor, backend::{AuxStore, Backend, Finalizer, TransactionFor}}; +use sc_client_api::{ + backend::{AuxStore, Backend, Finalizer, TransactionFor}, +}; use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys}; use parity_scale_codec::{Encode, Decode}; use sp_consensus::{ @@ -48,17 +49,15 @@ const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; /// Create light block importer. -pub fn light_block_import( - client: Arc>, - backend: Arc, +pub fn light_block_import( + client: Arc, + backend: Arc, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, authority_set_provider: Arc>, -) -> Result, ClientError> +) -> Result, ClientError> where - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, - Client: AuxStore, + BE: Backend, + Client: crate::ClientForGrandpa, { let info = client.info(); let import_data = load_aux_import_data( @@ -79,14 +78,14 @@ pub fn light_block_import( /// It is responsible for: /// - checking GRANDPA justifications; /// - fetching finality proofs for blocks that are enacting consensus changes. -pub struct GrandpaLightBlockImport { - client: Arc>, - backend: Arc, +pub struct GrandpaLightBlockImport { + client: Arc, + backend: Arc, authority_set_provider: Arc>, data: Arc>>, } -impl Clone for GrandpaLightBlockImport { +impl Clone for GrandpaLightBlockImport { fn clone(&self) -> Self { GrandpaLightBlockImport { client: self.client.clone(), @@ -111,27 +110,26 @@ struct LightAuthoritySet { authorities: AuthorityList, } -impl GrandpaLightBlockImport { +impl GrandpaLightBlockImport { /// Create finality proof request builder. pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ } } -impl BlockImport - for GrandpaLightBlockImport where +impl BlockImport + for GrandpaLightBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, - for<'a> &'a Client: - BlockImport> - + Finalizer + BE: Backend + 'static, + for<'a> &'a Client: + HeaderBackend + + BlockImport> + + Finalizer + AuxStore, { type Error = ConsensusError; - type Transaction = TransactionFor; + type Transaction = TransactionFor; fn import_block( &mut self, @@ -151,23 +149,22 @@ impl BlockImport } } -impl FinalityProofImport - for GrandpaLightBlockImport where +impl FinalityProofImport + for GrandpaLightBlockImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, - for<'a> &'a Client: - BlockImport> - + Finalizer + BE: Backend + 'static, + for<'a> &'a Client: + HeaderBackend + + BlockImport> + + Finalizer + AuxStore, { type Error = ConsensusError; fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { let mut out = Vec::new(); - let chain_info = self.client.chain_info(); + let chain_info = (&*self.client).info(); let data = self.data.read(); for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { @@ -567,7 +564,7 @@ fn on_post_finalization_error(error: ClientError, value_type: &str) -> Consensus #[cfg(test)] pub mod tests { use super::*; - use sp_consensus::ForkChoiceStrategy; + use sp_consensus::{ForkChoiceStrategy, BlockImport}; use sp_finality_grandpa::AuthorityId; use sp_core::{H256, crypto::Public}; use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryAuxStore; @@ -575,37 +572,36 @@ pub mod tests { use crate::tests::TestApi; use crate::finality_proof::tests::TestJustification; - pub struct NoJustificationsImport( - pub GrandpaLightBlockImport + pub struct NoJustificationsImport( + pub GrandpaLightBlockImport ); - impl Clone - for NoJustificationsImport where + impl Clone + for NoJustificationsImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, + BE: Backend + 'static, { fn clone(&self) -> Self { NoJustificationsImport(self.0.clone()) } } - impl BlockImport - for NoJustificationsImport where + impl BlockImport + for NoJustificationsImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, DigestFor: Encode, - RA: Send + Sync, - for<'a> &'a Client: - BlockImport> - + Finalizer + BE: Backend + 'static, + for <'a > &'a Client: + HeaderBackend + + BlockImport> + + Finalizer + AuxStore, + GrandpaLightBlockImport: + BlockImport, Error = ConsensusError> { type Error = ConsensusError; - type Transaction = TransactionFor; + type Transaction = TransactionFor; fn import_block( &mut self, @@ -624,16 +620,15 @@ pub mod tests { } } - impl FinalityProofImport - for NoJustificationsImport where + impl FinalityProofImport + for NoJustificationsImport where NumberFor: finality_grandpa::BlockNumberOps, - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, + BE: Backend + 'static, DigestFor: Encode, - RA: Send + Sync, - for<'a> &'a Client: - BlockImport> - + Finalizer + for <'a > &'a Client: + HeaderBackend + + BlockImport> + + Finalizer + AuxStore, { type Error = ConsensusError; @@ -654,19 +649,15 @@ pub mod tests { } /// Creates light block import that ignores justifications that came outside of finality proofs. - pub fn light_block_import_without_justifications( - client: Arc>, - backend: Arc, + pub fn light_block_import_without_justifications( + client: Arc, + backend: Arc, genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, authority_set_provider: Arc>, - ) -> Result, ClientError> + ) -> Result, ClientError> where - B: Backend + 'static, - E: CallExecutor + 'static + Clone + Send + Sync, - RA: Send + Sync, - Client: BlockImport - + Finalizer - + AuxStore, + BE: Backend + 'static, + Client: crate::ClientForGrandpa, { light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) .map(NoJustificationsImport) @@ -677,6 +668,7 @@ pub mod tests { justification: Option, ) -> ImportResult { let (client, _backend) = substrate_test_runtime_client::new_light(); + let client = Arc::new(client); let mut import_data = LightImportData { last_finalized: Default::default(), authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), @@ -696,7 +688,7 @@ pub mod tests { block.fork_choice = Some(ForkChoiceStrategy::LongestChain); do_import_block::<_, _, _, TestJustification>( - &client, + &*client, &mut import_data, block, new_cache, diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 77227909dc856b0ec0a99d04fefeacd20ad31ce3..921e5a3dd5b1453846a606b633f9e228e4017232 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -26,10 +26,9 @@ use finality_grandpa::{ use log::{debug, info, warn}; use sp_consensus::SelectChain; -use sc_client_api::{CallExecutor, backend::{Backend, AuxStore}}; -use sc_client::Client; +use sc_client_api::backend::Backend; use sp_runtime::traits::{NumberFor, Block as BlockT}; - +use sp_blockchain::HeaderMetadata; use crate::{ global_communication, CommandOrError, CommunicationIn, Config, environment, LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, @@ -38,17 +37,21 @@ use crate::authorities::SharedAuthoritySet; use crate::communication::{Network as NetworkT, NetworkBridge}; use crate::consensus_changes::SharedConsensusChanges; use sp_finality_grandpa::AuthorityId; +use std::marker::{PhantomData, Unpin}; -struct ObserverChain<'a, Block: BlockT, B, E, RA>(&'a Client); +struct ObserverChain<'a, Block: BlockT, Client> { + client: &'a Arc, + _phantom: PhantomData, +} -impl<'a, Block: BlockT, B, E, RA> finality_grandpa::Chain> - for ObserverChain<'a, Block, B, E, RA> where - B: Backend, - E: CallExecutor, +impl<'a, Block, Client> finality_grandpa::Chain> + for ObserverChain<'a, Block, Client> where + Block: BlockT, + Client: HeaderMetadata, NumberFor: BlockNumberOps, { fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - environment::ancestry(&self.0, base, block) + environment::ancestry(&self.client, base, block) } fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { @@ -57,8 +60,8 @@ impl<'a, Block: BlockT, B, E, RA> finality_grandpa::Chain( - client: &Arc>, +fn grandpa_observer( + client: &Arc, authority_set: &SharedAuthoritySet>, consensus_changes: &SharedConsensusChanges>, voters: &Arc>, @@ -67,13 +70,12 @@ fn grandpa_observer( note_round: F, ) -> impl Future>>> where NumberFor: BlockNumberOps, - B: Backend, - E: CallExecutor + Send + Sync + 'static, - RA: Send + Sync, S: Stream< Item = Result, CommandOrError>>, >, F: Fn(u64), + BE: Backend, + Client: crate::ClientForGrandpa, { let authority_set = authority_set.clone(); let consensus_changes = consensus_changes.clone(); @@ -101,7 +103,7 @@ fn grandpa_observer( let validation_result = match finality_grandpa::validate_commit( &commit, &voters, - &ObserverChain(&*client), + &ObserverChain { client: &client, _phantom: PhantomData }, ) { Ok(r) => r, Err(e) => return future::err(e.into()), @@ -113,7 +115,7 @@ fn grandpa_observer( // commit is valid, finalize the block it targets match environment::finalize_block( - &client, + client.clone(), &authority_set, &consensus_changes, None, @@ -150,19 +152,20 @@ fn grandpa_observer( /// listening for and validating GRANDPA commits instead of following the full /// protocol. Provide configuration and a link to a block import worker that has /// already been instantiated with `block_import`. -pub fn run_grandpa_observer( +/// NOTE: this is currently not part of the crate's public API since we don't consider +/// it stable enough to use on a live network. +#[allow(unused)] +pub fn run_grandpa_observer( config: Config, - link: LinkHalf, + link: LinkHalf, network: N, - on_exit: impl futures::Future + Clone + Send + Unpin + 'static, -) -> sp_blockchain::Result + Unpin + Send + 'static> where - B: Backend + 'static, - E: CallExecutor + Send + Sync + 'static, +) -> sp_blockchain::Result + Unpin + Send + 'static> +where + BE: Backend + Unpin + 'static, N: NetworkT + Send + Clone + 'static, SC: SelectChain + 'static, NumberFor: BlockNumberOps, - RA: Send + Sync + 'static, - Client: AuxStore, + Client: crate::ClientForGrandpa + 'static, { let LinkHalf { client, @@ -191,33 +194,32 @@ pub fn run_grandpa_observer( warn!("GRANDPA Observer failed: {:?}", e); }); - Ok(future::select(observer_work, on_exit).map(drop)) + Ok(observer_work.map(drop)) } /// Future that powers the observer. #[must_use] -struct ObserverWork, E, Backend, RA> { +struct ObserverWork> { observer: Pin>>> + Send>>, - client: Arc>, + client: Arc, network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: mpsc::UnboundedReceiver>>, + _phantom: PhantomData, } -impl ObserverWork +impl ObserverWork where B: BlockT, - N: NetworkT, + BE: Backend + 'static, + Client: crate::ClientForGrandpa + 'static, + Network: NetworkT, NumberFor: BlockNumberOps, - RA: 'static + Send + Sync, - E: CallExecutor + Send + Sync + 'static, - Bk: Backend + 'static, - Client: AuxStore, { fn new( - client: Arc>, - network: NetworkBridge, + client: Arc, + network: NetworkBridge, persistent_data: PersistentData, keystore: Option, voter_commands_rx: mpsc::UnboundedReceiver>>, @@ -232,6 +234,7 @@ where persistent_data, keystore, voter_commands_rx, + _phantom: PhantomData, }; work.rebuild_observer(); work @@ -248,12 +251,12 @@ where let (global_in, _) = global_communication( set_id, &voters, - &self.client, + self.client.clone(), &self.network, &self.keystore, ); - let last_finalized_number = self.client.chain_info().finalized_number; + let last_finalized_number = self.client.info().finalized_number; // NOTE: since we are not using `round_communication` we have to // manually note the round with the gossip validator, otherwise we won't @@ -321,15 +324,13 @@ where } } -impl Future for ObserverWork +impl Future for ObserverWork where B: BlockT, + BE: Backend + Unpin + 'static, + C: crate::ClientForGrandpa + 'static, N: NetworkT, NumberFor: BlockNumberOps, - RA: 'static + Send + Sync, - E: CallExecutor + Send + Sync + 'static, - Bk: Backend + 'static, - Client: AuxStore, { type Output = Result<(), Error>; @@ -376,6 +377,7 @@ mod tests { use crate::{aux_schema, communication::tests::{Event, make_test_network}}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; use sc_network::PeerId; + use sp_blockchain::HeaderBackend as _; use futures::executor; @@ -403,7 +405,7 @@ mod tests { let persistent_data = aux_schema::load_persistent( &*backend, - client.chain_info().genesis_hash, + client.info().genesis_hash, 0, || Ok(vec![]), ).unwrap(); diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index 9b9063f2c17e973246d58a968dc286120a665224..0774194d7eb5200d21222e754b71b4aa78b52dc2 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -19,13 +19,13 @@ use super::*; use environment::HasVoted; use sc_network_test::{ - Block, DummySpecialization, Hash, TestNetFactory, BlockImportAdapter, Peer, - PeersClient, PassThroughVerifier, + Block, Hash, TestNetFactory, BlockImportAdapter, Peer, + PeersClient, PassThroughVerifier, PeersFullClient, }; use sc_network::config::{ProtocolConfig, Roles, BoxFinalityProofRequestBuilder}; use parking_lot::Mutex; use futures_timer::Delay; -use tokio::runtime::current_thread; +use tokio::runtime::{Runtime, Handle}; use sp_keyring::Ed25519Keyring; use sc_client::LongestChain; use sc_client_api::backend::TransactionFor; @@ -39,36 +39,33 @@ use sp_consensus::{ use std::{ collections::{HashMap, HashSet}, result, - pin::Pin, task, + pin::Pin, }; use parity_scale_codec::Decode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HasherFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; use sp_runtime::generic::{BlockId, DigestItem}; use sp_core::{H256, NativeOrEncoded, ExecutionContext, crypto::Public}; use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, GrandpaApi}; use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; -use futures01::Async; -use futures::compat::Future01CompatExt; use authorities::AuthoritySet; use finality_proof::{ FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, }; use consensus_changes::ConsensusChanges; +use sc_block_builder::BlockBuilderProvider; type PeerData = Mutex< Option< LinkHalf< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, Block, - substrate_test_runtime_client::runtime::RuntimeApi, + PeersFullClient, LongestChain > > >; -type GrandpaPeer = Peer; +type GrandpaPeer = Peer; struct GrandpaTestNet { peers: Vec, @@ -90,7 +87,6 @@ impl GrandpaTestNet { } impl TestNetFactory for GrandpaTestNet { - type Specialization = DummySpecialization; type Verifier = PassThroughVerifier; type PeerData = PeerData; @@ -171,11 +167,10 @@ impl TestNetFactory for GrandpaTestNet { fn make_finality_proof_provider( &self, client: PeersClient - ) -> Option>> { + ) -> Option>> { match client { PeersClient::Full(_, ref backend) => { - let authorities_provider = Arc::new(self.test_config.clone()); - Some(Arc::new(FinalityProofProvider::new(backend.clone(), authorities_provider))) + Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone()))) }, PeersClient::Light(_, _) => None, } @@ -194,17 +189,6 @@ impl TestNetFactory for GrandpaTestNet { } } -#[derive(Clone)] -struct Exit; - -impl futures::Future for Exit { - type Output = (); - - fn poll(self: Pin<&mut Self>, _: &mut task::Context) -> task::Poll<()> { - task::Poll::Pending - } -} - #[derive(Default, Clone)] pub(crate) struct TestApi { genesis_authorities: AuthorityList, @@ -293,7 +277,7 @@ impl ApiExt for RuntimeApi { fn into_storage_changes( &self, _: &Self::StateBackend, - _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, + _: Option<&sp_api::ChangesTrieState, sp_api::NumberFor>>, _: ::Hash, ) -> std::result::Result, String> where Self: Sized @@ -327,7 +311,7 @@ impl AuthoritySetForFinalityProver for TestApi { fn prove_authorities(&self, block: &BlockId) -> Result { let authorities = self.authorities(block)?; - let backend = >>::from(vec![ + let backend = >>::from(vec![ (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) ]); let proof = prove_read(backend, vec![b"authorities"]) @@ -343,7 +327,7 @@ impl AuthoritySetForFinalityChecker for TestApi { header: ::Header, proof: StorageProof, ) -> Result { - let results = read_proof_check::, _>( + let results = read_proof_check::, _>( *header.state_root(), proof, vec![b"authorities"] ) .expect("failure checking read proof for authorities"); @@ -372,27 +356,25 @@ fn create_keystore(authority: Ed25519Keyring) -> (KeyStorePtr, tempfile::TempDir (keystore, keystore_path) } -fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut current_thread::Runtime) { - let drive_to_completion = futures01::future::poll_fn(|| { - net.lock().poll(); Ok::, ()>(Async::NotReady) +fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); Poll::<()>::Pending }); runtime.block_on( - future::select(future, drive_to_completion.compat()) - .map(|_| Ok::<(), ()>(())) - .compat() - ).unwrap(); + future::select(future, drive_to_completion) + ); } // run the voters to completion. provide a closure to be invoked after // the voters are spawned but before blocking on them. fn run_to_completion_with( - runtime: &mut current_thread::Runtime, + runtime: &mut Runtime, blocks: u64, net: Arc>, peers: &[Ed25519Keyring], with: F, ) -> u64 where - F: FnOnce(current_thread::Handle) -> Option>>> + F: FnOnce(Handle) -> Option>>> { use parking_lot::RwLock; @@ -400,7 +382,7 @@ fn run_to_completion_with( let highest_finalized = Arc::new(RwLock::new(0)); - if let Some(f) = (with)(runtime.handle()) { + if let Some(f) = (with)(runtime.handle().clone()) { wait_for.push(f); }; @@ -450,15 +432,15 @@ fn run_to_completion_with( link: link, network: net_service, inherent_data_providers: InherentDataProviders::new(), - on_exit: Exit, telemetry_on_connect: None, voting_rule: (), + prometheus_registry: None, }; let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); assert_send(&voter); - runtime.spawn(voter.unit_error().compat()); + runtime.spawn(voter); } // wait for all finalized on each. @@ -470,7 +452,7 @@ fn run_to_completion_with( } fn run_to_completion( - runtime: &mut current_thread::Runtime, + runtime: &mut Runtime, blocks: u64, net: Arc>, peers: &[Ed25519Keyring] @@ -499,13 +481,13 @@ fn add_forced_change( #[test] fn finalize_3_voters_no_observers() { let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); net.peer(0).push_blocks(20, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); for i in 0..3 { assert_eq!(net.peer(i).client().info().best_number, 20, @@ -524,14 +506,14 @@ fn finalize_3_voters_no_observers() { #[test] fn finalize_3_voters_1_full_observer() { - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); net.peer(0).push_blocks(20, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); @@ -581,16 +563,16 @@ fn finalize_3_voters_1_full_observer() { link: link, network: net_service, inherent_data_providers: InherentDataProviders::new(), - on_exit: Exit, telemetry_on_connect: None, voting_rule: (), + prometheus_registry: None, }; voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); } for voter in voters { - runtime.spawn(voter.unit_error().compat()); + runtime.spawn(voter); } // wait for all finalized on each. @@ -628,10 +610,10 @@ fn transition_3_voters_twice_1_full_observer() { let api = TestApi::new(genesis_voters); let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); net.lock().peer(0).push_blocks(1, false); - net.lock().block_until_sync(&mut runtime); + net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { let full_client = peer.client().as_full().expect("only full clients are used in test"); @@ -691,7 +673,7 @@ fn transition_3_voters_twice_1_full_observer() { future::ready(()) }); - runtime.spawn(block_production.unit_error().compat()); + runtime.spawn(block_production); } let mut finality_notifications = Vec::new(); @@ -744,13 +726,13 @@ fn transition_3_voters_twice_1_full_observer() { link: link, network: net_service, inherent_data_providers: InherentDataProviders::new(), - on_exit: Exit, telemetry_on_connect: None, voting_rule: (), + prometheus_registry: None, }; let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - runtime.spawn(voter.unit_error().compat()); + runtime.spawn(voter); } // wait for all finalized on each. @@ -761,14 +743,14 @@ fn transition_3_voters_twice_1_full_observer() { #[test] fn justification_is_emitted_when_consensus_data_changes() { - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); // import block#1 WITH consensus data change let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; net.peer(0).push_authorities_change_block(new_authorities); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); run_to_completion(&mut runtime, 1, net.clone(), peers); @@ -779,13 +761,13 @@ fn justification_is_emitted_when_consensus_data_changes() { #[test] fn justification_is_generated_periodically() { - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); net.peer(0).push_blocks(32, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); run_to_completion(&mut runtime, 32, net.clone(), peers); @@ -818,7 +800,7 @@ fn consensus_changes_works() { #[test] fn sync_justifications_on_change_blocks() { - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers_b); @@ -842,7 +824,7 @@ fn sync_justifications_on_change_blocks() { // add more blocks on top of it (until we have 25) net.peer(0).push_blocks(4, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); for i in 0..4 { assert_eq!(net.peer(i).client().info().best_number, 25, @@ -859,9 +841,9 @@ fn sync_justifications_on_change_blocks() { } // the last peer should get the justification by syncing from other peers - futures::executor::block_on(futures::future::poll_fn(move |_| { + futures::executor::block_on(futures::future::poll_fn(move |cx| { if net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() { - net.lock().poll(); + net.lock().poll(cx); Poll::Pending } else { Poll::Ready(()) @@ -872,7 +854,7 @@ fn sync_justifications_on_change_blocks() { #[test] fn finalizes_multiple_pending_changes_in_order() { let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; @@ -917,7 +899,7 @@ fn finalizes_multiple_pending_changes_in_order() { // add more blocks on top of it (until we have 30) net.peer(0).push_blocks(4, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); // all peers imported both change blocks for i in 0..6 { @@ -932,7 +914,7 @@ fn finalizes_multiple_pending_changes_in_order() { #[test] fn force_change_to_new_set() { let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); // two of these guys are offline. let genesis_authorities = &[ Ed25519Keyring::Alice, @@ -967,7 +949,7 @@ fn force_change_to_new_set() { }); net.lock().peer(0).push_blocks(25, false); - net.lock().block_until_sync(&mut runtime); + net.lock().block_until_sync(); for (i, peer) in net.lock().peers().iter().enumerate() { assert_eq!(peer.client().info().best_number, 26, @@ -1095,7 +1077,7 @@ fn voter_persists_its_votes() { use futures::channel::mpsc; let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); // we have two authorities but we'll only be running the voter for alice // we are going to be listening for the prevotes it casts @@ -1105,7 +1087,7 @@ fn voter_persists_its_votes() { // alice has a chain with 20 blocks let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); net.peer(0).push_blocks(20, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); assert_eq!(net.peer(0).client().info().best_number, 20, "Peer #{} failed to sync", 0); @@ -1169,9 +1151,9 @@ fn voter_persists_its_votes() { link, network: this.net.lock().peers[0].network_service().clone(), inherent_data_providers: InherentDataProviders::new(), - on_exit: Exit, telemetry_on_connect: None, voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, }; let voter = run_grandpa_voter(grandpa_params) @@ -1203,7 +1185,7 @@ fn voter_persists_its_votes() { net: net.clone(), client: client.clone(), keystore, - }.unit_error().compat()); + }); } let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); @@ -1248,10 +1230,7 @@ fn voter_persists_its_votes() { HasVoted::No, ); - runtime.spawn( - network.map_err(|e| panic!("network bridge should not error: {:?}", e)) - .compat(), - ); + runtime.spawn(network); let round_tx = Arc::new(Mutex::new(round_tx)); let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); @@ -1343,7 +1322,7 @@ fn voter_persists_its_votes() { panic!() } } - }).map(Ok).boxed().compat()); + })); } block_until_complete(exit_rx.into_future(), &net, &mut runtime); @@ -1352,13 +1331,13 @@ fn voter_persists_its_votes() { #[test] fn finalize_3_voters_1_light_observer() { let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; let voters = make_ids(authorities); let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); net.peer(0).push_blocks(20, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); for i in 0..4 { assert_eq!(net.peer(i).client().info().best_number, 20, @@ -1376,7 +1355,7 @@ fn finalize_3_voters_1_light_observer() { run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { executor.spawn( - run_grandpa_observer( + observer::run_grandpa_observer( Config { gossip_duration: TEST_GOSSIP_DURATION, justification_period: 32, @@ -1387,9 +1366,8 @@ fn finalize_3_voters_1_light_observer() { }, link, net.lock().peers[3].network_service().clone(), - Exit, - ).unwrap().unit_error().compat() - ).unwrap(); + ).unwrap() + ); Some(Box::pin(finality_notifications.map(|_| ()))) }); @@ -1398,7 +1376,7 @@ fn finalize_3_voters_1_light_observer() { #[test] fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice]; let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); @@ -1409,18 +1387,17 @@ fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); let net = Arc::new(Mutex::new(net)); run_to_completion(&mut runtime, 1, net.clone(), peers); - net.lock().block_until_sync(&mut runtime); + net.lock().block_until_sync(); // check that the block#1 is finalized on light client - let mut runtime = current_thread::Runtime::new().unwrap(); - let _ = runtime.block_on(futures::future::poll_fn(move |_| { + runtime.block_on(futures::future::poll_fn(move |cx| { if net.lock().peer(1).client().info().finalized_number == 1 { Poll::Ready(()) } else { - net.lock().poll(); + net.lock().poll(cx); Poll::Pending } - }).unit_error().compat()); + })); } #[test] @@ -1429,7 +1406,7 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ const FORCE_CHANGE: bool = true; let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); // two of these guys are offline. let genesis_authorities = if FORCE_CHANGE { @@ -1474,14 +1451,14 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])] ); // #10 net.lock().peer(0).push_blocks(1, false); // best is #11 - net.lock().block_until_sync(&mut runtime); + net.lock().block_until_sync(); // finalize block #11 on full clients run_to_completion(&mut runtime, 11, net.clone(), peers_a); // request finalization by light client net.lock().add_light_peer(&GrandpaTestNet::default_config()); - net.lock().block_until_sync(&mut runtime); + net.lock().block_until_sync(); // check block, finalized on light client assert_eq!( @@ -1493,14 +1470,14 @@ fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_differ #[test] fn voter_catches_up_to_latest_round_when_behind() { let _ = env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); + let mut runtime = Runtime::new().unwrap(); let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; let voters = make_ids(peers); let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); net.peer(0).push_blocks(50, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let net = Arc::new(Mutex::new(net)); let mut finality_notifications = Vec::new(); @@ -1518,9 +1495,9 @@ fn voter_catches_up_to_latest_round_when_behind() { link, network: net.lock().peer(peer_id).network_service().clone(), inherent_data_providers: InherentDataProviders::new(), - on_exit: Exit, telemetry_on_connect: None, voting_rule: (), + prometheus_registry: None, }; Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) @@ -1550,19 +1527,18 @@ fn voter_catches_up_to_latest_round_when_behind() { let voter = voter(Some(keystore), peer_id, link, net.clone()); - runtime.spawn(voter.unit_error().compat()); + runtime.spawn(voter); } // wait for them to finalize block 50. since they'll vote on 3/4 of the // unfinalized chain it will take at least 4 rounds to do it. - let wait_for_finality = ::futures::future::join_all(finality_notifications) - .map(|_| ()); + let wait_for_finality = ::futures::future::join_all(finality_notifications); // spawn a new voter, it should be behind by at least 4 rounds and should be // able to catch up to the latest round let test = { let net = net.clone(); - let runtime = runtime.handle(); + let runtime = runtime.handle().clone(); wait_for_finality.then(move |_| { let peer_id = 2; @@ -1576,7 +1552,7 @@ fn voter_catches_up_to_latest_round_when_behind() { let voter = voter(None, peer_id, link, net); - runtime.spawn(voter.unit_error().compat()).unwrap(); + runtime.spawn(voter); let start_time = std::time::Instant::now(); let timeout = Duration::from_secs(5 * 60); @@ -1597,14 +1573,12 @@ fn voter_catches_up_to_latest_round_when_behind() { }) }; - let drive_to_completion = futures01::future::poll_fn(|| { - net.lock().poll(); Ok::, ()>(Async::NotReady) + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); Poll::<()>::Pending }); runtime.block_on( - future::select(test, drive_to_completion.compat()) - .map(|_| Ok::<(), ()>(())) - .compat() - ).unwrap(); + future::select(test, drive_to_completion) + ); } #[test] @@ -1655,6 +1629,8 @@ fn grandpa_environment_respects_voting_rules() { voters: Arc::new(authority_set.current_authorities()), network, voting_rule, + metrics: None, + _phantom: PhantomData, } }; diff --git a/client/informant/Cargo.toml b/client/informant/Cargo.toml index 197f320889c0d556bc475d1fb3f410ce31e6c9b4..a29d793c3601b46b9e902fdde25cab18dfb8fc4b 100644 --- a/client/informant/Cargo.toml +++ b/client/informant/Cargo.toml @@ -1,10 +1,12 @@ [package] name = "sc-informant" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Substrate informant." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] ansi_term = "0.12.1" @@ -12,8 +14,8 @@ futures = "0.3.1" log = "0.4.8" parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } wasm-timer = "0.2" -sc-client-api = { version = "2.0.0", path = "../api" } -sc-network = { version = "0.8", path = "../network" } -sc-service = { version = "0.8", default-features = false, path = "../service" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../service" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 699dcfdd7425e2b7b9e0f82eba481df2e81bf951..d104a64a2dbd121a00d27dde8de80f61fee0c12d 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -46,7 +46,10 @@ pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futur if let Some(ref usage) = info.usage { trace!(target: "usage", "Usage statistics: {}", usage); } else { - trace!(target: "usage", "Usage statistics not displayed as backend does not provide it") + trace!( + target: "usage", + "Usage statistics not displayed as backend does not provide it", + ) } #[cfg(not(target_os = "unknown"))] trace!( diff --git a/client/keystore/Cargo.toml b/client/keystore/Cargo.toml index 89ab787cb4bdc9e9c4fd68c12f79dd0aef63f997..247376bc46e17952cf6c1551becb4324cba90ba7 100644 --- a/client/keystore/Cargo.toml +++ b/client/keystore/Cargo.toml @@ -1,14 +1,19 @@ [package] name = "sc-keystore" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Keystore (and session key management) for ed25519 based chains like Polkadot." +documentation = "https://docs.rs/sc-keystore" + [dependencies] derive_more = "0.99.2" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } hex = "0.4.0" rand = "0.7.2" serde_json = "1.0.41" diff --git a/client/network-gossip/Cargo.toml b/client/network-gossip/Cargo.toml index 98b2bd0590af94aeea4a8f87b0c14b47a7bca38b..08c304c6e0614edad4bfa0cd8902176a5a4c1c10 100644 --- a/client/network-gossip/Cargo.toml +++ b/client/network-gossip/Cargo.toml @@ -1,19 +1,22 @@ [package] description = "Gossiping for the Substrate network protocol" name = "sc-network-gossip" -version = "0.8.0" +version = "0.8.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network-gossip" + [dependencies] -log = "0.4.8" -futures = { version = "0.3.1", features = ["compat"] } -wasm-timer = "0.2" +futures = "0.3.1" futures-timer = "3.0.1" -futures01 = { package = "futures", version = "0.1.29" } -libp2p = { version = "0.16.0", default-features = false, features = ["libp2p-websocket"] } -lru = "0.1.2" +libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } +log = "0.4.8" +lru = "0.4.3" parking_lot = "0.10.0" -sc-network = { version = "0.8", path = "../network" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +wasm-timer = "0.2" diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index 87958cbc14563dab3c41e3d9e84a6941c8468447..c911766aba40a4f41b0588ae2c33a9a51d5daab4 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -20,12 +20,11 @@ use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENA use sc_network::message::generic::ConsensusMessage; use sc_network::{Event, ReputationChange}; -use futures::{prelude::*, channel::mpsc, compat::Compat01As03}; -use futures01::stream::Stream as Stream01; +use futures::{prelude::*, channel::mpsc}; use libp2p::PeerId; use parking_lot::Mutex; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}}; +use std::{borrow::Cow, pin::Pin, sync::Arc, task::{Context, Poll}}; /// Wraps around an implementation of the `Network` crate and provides gossiping capabilities on /// top of it. @@ -38,7 +37,7 @@ struct GossipEngineInner { state_machine: ConsensusGossip, network: Box + Send>, periodic_maintenance_interval: futures_timer::Delay, - network_event_stream: Compat01As03 + Send>>, + network_event_stream: Pin + Send>>, engine_id: ConsensusEngineId, } @@ -49,6 +48,7 @@ impl GossipEngine { pub fn new + Send + Clone + 'static>( mut network: N, engine_id: ConsensusEngineId, + protocol_name: impl Into>, validator: Arc>, ) -> Self where B: 'static { let mut state_machine = ConsensusGossip::new(); @@ -57,14 +57,14 @@ impl GossipEngine { // might miss events. let network_event_stream = network.event_stream(); - network.register_notifications_protocol(engine_id); + network.register_notifications_protocol(engine_id, protocol_name.into()); state_machine.register_validator(&mut network, engine_id, validator); let inner = Arc::new(Mutex::new(GossipEngineInner { state_machine, network: Box::new(network), periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), - network_event_stream: Compat01As03::new(network_event_stream), + network_event_stream, engine_id, })); @@ -178,7 +178,7 @@ impl Future for GossipEngineInner { fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { let this = &mut *self; - while let Poll::Ready(Some(Ok(event))) = this.network_event_stream.poll_next_unpin(cx) { + while let Poll::Ready(Some(event)) = this.network_event_stream.poll_next_unpin(cx) { match event { Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, roles } => { if msg_engine_id != this.engine_id { diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 705a27210ac53de386aedd4c9d681467d9acda55..4e4d32366f29d4c907561fc844e107edc60b8627 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -59,9 +59,9 @@ pub use self::state_machine::TopicNotification; pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext, ValidationResult}; use futures::prelude::*; -use sc_network::{specialization::NetworkSpecialization, Event, ExHashT, NetworkService, PeerId, ReputationChange}; +use sc_network::{Event, ExHashT, NetworkService, PeerId, ReputationChange}; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::sync::Arc; +use std::{borrow::Cow, pin::Pin, sync::Arc}; mod bridge; mod state_machine; @@ -70,7 +70,7 @@ mod validator; /// Abstraction over a network. pub trait Network { /// Returns a stream of events representing what happens on the network. - fn event_stream(&self) -> Box + Send>; + fn event_stream(&self) -> Pin + Send>>; /// Adjust the reputation of a node. fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); @@ -86,7 +86,8 @@ pub trait Network { /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. fn register_notifications_protocol( &self, - engine_id: ConsensusEngineId + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, ); /// Notify everyone we're connected to that we have the given block. @@ -96,9 +97,9 @@ pub trait Network { fn announce(&self, block: B::Hash, associated_data: Vec); } -impl, H: ExHashT> Network for Arc> { - fn event_stream(&self) -> Box + Send> { - Box::new(NetworkService::event_stream(self).map(|v| Ok::<_, ()>(v)).compat()) +impl Network for Arc> { + fn event_stream(&self) -> Pin + Send>> { + Box::pin(NetworkService::event_stream(self)) } fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { @@ -116,8 +117,9 @@ impl, H: ExHashT> Network for Arc, ) { - NetworkService::register_notifications_protocol(self, engine_id) + NetworkService::register_notifications_protocol(self, engine_id, protocol_name) } fn announce(&self, block: B::Hash, associated_data: Vec) { diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index 26433e63ec3ea7e09a6185628f4a5a3ec894e484..db5ea3603dcacea771f7bb1d458eac7b3c9bd9cc 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -258,6 +258,7 @@ impl ConsensusGossip { let mut context = NetworkContext { gossip: self, network, engine_id: engine_id.clone() }; v.peer_disconnected(&mut context, &who); } + self.peers.remove(&who); } /// Perform periodic maintenance @@ -644,4 +645,52 @@ mod tests { let _ = consensus.live_message_sinks.remove(&([0, 0, 0, 0], topic)); assert_eq!(stream.next(), None); } + + #[test] + fn peer_is_removed_on_disconnect() { + struct TestNetwork; + impl Network for TestNetwork { + fn event_stream( + &self, + ) -> std::pin::Pin + Send>> { + unimplemented!("Not required in tests") + } + + fn report_peer(&self, _: PeerId, _: crate::ReputationChange) { + unimplemented!("Not required in tests") + } + + fn disconnect_peer(&self, _: PeerId) { + unimplemented!("Not required in tests") + } + + fn write_notification(&self, _: PeerId, _: crate::ConsensusEngineId, _: Vec) { + unimplemented!("Not required in tests") + } + + fn register_notifications_protocol( + &self, + _: ConsensusEngineId, + _: std::borrow::Cow<'static, [u8]>, + ) { + unimplemented!("Not required in tests") + } + + fn announce(&self, _: H256, _: Vec) { + unimplemented!("Not required in tests") + } + } + + let mut consensus = ConsensusGossip::::new(); + consensus.register_validator_internal([0, 0, 0, 0], Arc::new(AllowAll)); + + let mut network = TestNetwork; + + let peer_id = PeerId::random(); + consensus.new_peer(&mut network, peer_id.clone(), Roles::FULL); + assert!(consensus.peers.contains_key(&peer_id)); + + consensus.peer_disconnected(&mut network, peer_id.clone()); + assert!(!consensus.peers.contains_key(&peer_id)); + } } diff --git a/client/network/Cargo.toml b/client/network/Cargo.toml index e53ba13c8d2e5861a0be48d3a6d76f472134b406..d99dce2fd2cdd6c78411e2cf412e4227a71aeafa 100644 --- a/client/network/Cargo.toml +++ b/client/network/Cargo.toml @@ -1,10 +1,14 @@ [package] description = "Substrate network protocol" name = "sc-network" -version = "0.8.0" +version = "0.8.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-network" + [build-dependencies] prost-build = "0.6.1" @@ -12,17 +16,17 @@ prost-build = "0.6.1" [dependencies] bitflags = "1.2.0" bytes = "0.5.0" -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } derive_more = "0.99.2" either = "1.5.3" erased-serde = "0.3.9" fnv = "1.0.6" -fork-tree = { version = "2.0.0", path = "../../utils/fork-tree" } +fork-tree = { version = "2.0.0-alpha.2", path = "../../utils/fork-tree" } futures = "0.3.1" futures_codec = "0.3.3" futures-timer = "3.0.1" wasm-timer = "0.2" -libp2p = { version = "0.16.0", default-features = false, features = ["libp2p-websocket"] } +libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } linked-hash-map = "0.5.2" linked_hash_set = "0.1.3" log = "0.4.8" @@ -32,26 +36,25 @@ parking_lot = "0.10.0" prost = "0.6.1" rand = "0.7.2" rustc-hex = "2.0.1" -sc-block-builder = { version = "0.8", path = "../block-builder" } -sc-client = { version = "0.8", path = "../" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-peerset = { version = "2.0.0", path = "../peerset" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../block-builder" } +sc-client = { version = "0.8.0-alpha.2", path = "../" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-peerset = { version = "2.0.0-alpha.2", path = "../peerset" } +pin-project = "0.4.6" serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } slog_derive = "0.2.0" smallvec = "0.6.10" -sp-arithmetic = { version = "2.0.0", path = "../../primitives/arithmetic" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sp-consensus-babe = { version = "0.8", path = "../../primitives/consensus/babe" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -substrate-test-client = { version = "2.0.0", optional = true, path = "../../test-utils/client" } -substrate-test-runtime-client = { version = "2.0.0", optional = true, path = "../../test-utils/runtime/client" } +sp-arithmetic = { version = "2.0.0-alpha.2", path = "../../primitives/arithmetic" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", version = "0.8.0-alpha.2", path = "../../utils/prometheus" } thiserror = "1" -unsigned-varint = { version = "0.3.0", features = ["futures-codec"] } +unsigned-varint = { version = "0.3.1", features = ["futures", "futures-codec"] } void = "1.0.2" zeroize = "1.0.0" @@ -61,12 +64,12 @@ assert_matches = "1.3" env_logger = "0.7.0" quickcheck = "0.9.0" rand = "0.7.2" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -sp-test-primitives = { version = "2.0.0", path = "../../primitives/test-primitives" } -substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +sp-test-primitives = { version = "2.0.0-dev", path = "../../primitives/test-primitives" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } tempfile = "3.1.0" [features] default = [] -test-helpers = ["sp-keyring", "substrate-test-runtime-client"] + diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index c8c5e59fe62cc3bc34d00aa441194b5993f6cd69..e7aca1975cd0d03f450e23528137db6511b1490e 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -16,9 +16,8 @@ use crate::{ debug_info, discovery::DiscoveryBehaviour, discovery::DiscoveryOut, DiscoveryNetBehaviour, - Event, protocol::event::DhtEvent + Event, protocol::event::DhtEvent, ExHashT, }; -use crate::{ExHashT, specialization::NetworkSpecialization}; use crate::protocol::{self, light_client_handler, CustomMessageOutcome, Protocol}; use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; @@ -33,9 +32,9 @@ use void; /// General behaviour of the network. Combines all protocols together. #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] -pub struct Behaviour, H: ExHashT> { +pub struct Behaviour { /// All the substrate-specific protocols. - substrate: Protocol, + substrate: Protocol, /// Periodically pings and identifies the nodes we are connected to, and store information in a /// cache. debug_info: debug_info::DebugInfoBehaviour, @@ -55,13 +54,15 @@ pub enum BehaviourOut { BlockImport(BlockOrigin, Vec>), JustificationImport(Origin, B::Hash, NumberFor, Justification), FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + /// Started a random Kademlia discovery query. + RandomKademliaStarted, Event(Event), } -impl, H: ExHashT> Behaviour { +impl Behaviour { /// Builds a new `Behaviour`. pub async fn new( - substrate: Protocol, + substrate: Protocol, user_agent: String, local_public_key: PublicKey, known_addresses: Vec<(PeerId, Multiaddr)>, @@ -97,6 +98,11 @@ impl, H: ExHashT> Behaviour { self.discovery.add_known_address(peer_id, addr) } + /// Returns the number of nodes that are in the Kademlia k-buckets. + pub fn num_kbuckets_entries(&mut self) -> usize { + self.discovery.num_kbuckets_entries() + } + /// Borrows `self` and returns a struct giving access to the information about a node. /// /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes @@ -107,12 +113,12 @@ impl, H: ExHashT> Behaviour { } /// Returns a shared reference to the user protocol. - pub fn user_protocol(&self) -> &Protocol { + pub fn user_protocol(&self) -> &Protocol { &self.substrate } /// Returns a mutable reference to the user protocol. - pub fn user_protocol_mut(&mut self) -> &mut Protocol { + pub fn user_protocol_mut(&mut self) -> &mut Protocol { &mut self.substrate } @@ -133,15 +139,15 @@ impl, H: ExHashT> Behaviour { } } -impl, H: ExHashT> NetworkBehaviourEventProcess for -Behaviour { +impl NetworkBehaviourEventProcess for +Behaviour { fn inject_event(&mut self, event: void::Void) { void::unreachable(event) } } -impl, H: ExHashT> NetworkBehaviourEventProcess> for -Behaviour { +impl NetworkBehaviourEventProcess> for +Behaviour { fn inject_event(&mut self, event: CustomMessageOutcome) { match event { CustomMessageOutcome::BlockImport(origin, blocks) => @@ -174,8 +180,8 @@ Behaviour { } } -impl, H: ExHashT> NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, event: debug_info::DebugInfoEvent) { let debug_info::DebugInfoEvent::Identified { peer_id, mut info } = event; if info.listen_addrs.len() > 30 { @@ -192,8 +198,8 @@ impl, H: ExHashT> NetworkBehaviourEventPr } } -impl, H: ExHashT> NetworkBehaviourEventProcess - for Behaviour { +impl NetworkBehaviourEventProcess + for Behaviour { fn inject_event(&mut self, out: DiscoveryOut) { match out { DiscoveryOut::UnroutablePeer(_peer_id) => { @@ -217,11 +223,14 @@ impl, H: ExHashT> NetworkBehaviourEventPr DiscoveryOut::ValuePutFailed(key) => { self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePutFailed(key)))); } + DiscoveryOut::RandomKademliaStarted => { + self.events.push(BehaviourOut::RandomKademliaStarted); + } } } } -impl, H: ExHashT> Behaviour { +impl Behaviour { fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> Poll>> { if !self.events.is_empty() { return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))) diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index b991a0e65208c2b8bee5c89162ddaec4755ef3f6..3c075ec881ca19395b719b58eaa20b68894c6106 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -18,7 +18,7 @@ use sc_client::Client as SubstrateClient; use sp_blockchain::{Error, Info as BlockchainInfo}; -use sc_client_api::{ChangesProof, StorageProof, CallExecutor}; +use sc_client_api::{ChangesProof, StorageProof, CallExecutor, ProofProvider}; use sp_consensus::{BlockImport, BlockStatus, Error as ConsensusError}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::generic::{BlockId}; @@ -50,7 +50,11 @@ pub trait Client: Send + Sync { -> Result<(Block::Header, StorageProof), Error>; /// Get storage read execution proof. - fn read_proof(&self, block: &Block::Hash, keys: &[Vec]) -> Result; + fn read_proof( + &self, + block: &Block::Hash, + keys: &mut dyn Iterator, + ) -> Result; /// Get child storage read execution proof. fn read_child_proof( @@ -58,7 +62,7 @@ pub trait Client: Send + Sync { block: &Block::Hash, storage_key: &[u8], child_info: ChildInfo, - keys: &[Vec], + keys: &mut dyn Iterator, ) -> Result; /// Get method execution proof. @@ -125,14 +129,19 @@ impl Client for SubstrateClient where (self as &SubstrateClient).justification(id) } - fn header_proof(&self, block_number: ::Number) - -> Result<(Block::Header, StorageProof), Error> - { - (self as &SubstrateClient).header_proof(&BlockId::Number(block_number)) + fn header_proof( + &self, + block_number: ::Number, + )-> Result<(Block::Header, StorageProof), Error> { + ProofProvider::::header_proof(self, &BlockId::Number(block_number)) } - fn read_proof(&self, block: &Block::Hash, keys: &[Vec]) -> Result { - (self as &SubstrateClient).read_proof(&BlockId::Hash(block.clone()), keys) + fn read_proof( + &self, + block: &Block::Hash, + keys: &mut dyn Iterator, + ) -> Result { + ProofProvider::::read_proof(self, &BlockId::Hash(block.clone()), keys) } fn read_child_proof( @@ -140,10 +149,9 @@ impl Client for SubstrateClient where block: &Block::Hash, storage_key: &[u8], child_info: ChildInfo, - keys: &[Vec], + keys: &mut dyn Iterator, ) -> Result { - (self as &SubstrateClient) - .read_child_proof(&BlockId::Hash(block.clone()), storage_key, child_info, keys) + ProofProvider::::read_child_proof(self, &BlockId::Hash(block.clone()), storage_key, child_info, keys) } fn execution_proof( @@ -152,7 +160,8 @@ impl Client for SubstrateClient where method: &str, data: &[u8], ) -> Result<(Vec, StorageProof), Error> { - (self as &SubstrateClient).execution_proof( + ProofProvider::::execution_proof( + self, &BlockId::Hash(block.clone()), method, data, @@ -168,7 +177,7 @@ impl Client for SubstrateClient where storage_key: Option<&StorageKey>, key: &StorageKey, ) -> Result, Error> { - (self as &SubstrateClient).key_changes_proof(first, last, min, max, storage_key, key) + ProofProvider::::key_changes_proof(self, first, last, min, max, storage_key, key) } fn is_descendent_of(&self, base: &Block::Hash, block: &Block::Hash) -> Result { diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 2f920c6663955aad80c8525543c9fca490887b79..76c0c9b5440f75a9eadca73539186a051afa9cca 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -19,12 +19,18 @@ //! The [`Params`] struct is the struct that must be passed in order to initialize the networking. //! See the documentation of [`Params`]. -pub use crate::protocol::ProtocolConfig; +pub use crate::chain::{Client, FinalityProofProvider}; +pub use crate::on_demand_layer::OnDemand; +pub use crate::service::TransactionPool; pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; -use crate::chain::{Client, FinalityProofProvider}; -use crate::on_demand_layer::OnDemand; -use crate::service::{ExHashT, TransactionPool}; +// Note: this re-export shouldn't be part of the public API of the crate and will be removed in +// the future. +#[doc(hidden)] +pub use crate::protocol::ProtocolConfig; + +use crate::service::ExHashT; + use bitflags::bitflags; use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::ImportQueue}; use sp_runtime::traits::{Block as BlockT}; @@ -35,9 +41,10 @@ use core::{fmt, iter}; use std::{future::Future, pin::Pin}; use std::{error::Error, fs, io::{self, Write}, net::Ipv4Addr, path::{Path, PathBuf}, sync::Arc}; use zeroize::Zeroize; +use prometheus_endpoint::Registry; /// Network initialization parameters. -pub struct Params { +pub struct Params { /// Assigned roles for our node (full, light, ...). pub roles: Roles, @@ -82,11 +89,11 @@ pub struct Params { /// valid. pub import_queue: Box>, - /// Customization of the network. Use this to plug additional networking capabilities. - pub specialization: S, - /// Type to check incoming block announcements. pub block_announce_validator: Box + Send>, + + /// Registry for recording prometheus metrics to. + pub metrics_registry: Option, } bitflags! { @@ -120,6 +127,12 @@ impl Roles { } } +impl fmt::Display for Roles { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "{:?}", self) + } +} + impl codec::Encode for Roles { fn encode_to(&self, dest: &mut T) { dest.push_byte(self.bits()) @@ -241,9 +254,9 @@ impl From for ParseErr { #[derive(Clone, Debug)] pub struct NetworkConfiguration { /// Directory path to store general network configuration. None means nothing will be saved. - pub config_path: Option, + pub config_path: Option, /// Directory path to store network-specific configuration. None means nothing will be saved. - pub net_config_path: Option, + pub net_config_path: Option, /// Multiaddresses to listen for incoming connections. pub listen_addresses: Vec, /// Multiaddresses to advertise. Detected automatically if empty. diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index a236ceb1a8b6d8ba4e3981f83e7577fb06224d4e..ecce7d81e30cd62ed2856f03aed35eaa480ce215 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -176,6 +176,11 @@ impl DiscoveryBehaviour { pub fn put_value(&mut self, key: record::Key, value: Vec) { self.kademlia.put_record(Record::new(key, value), Quorum::All); } + + /// Returns the number of nodes that are in the Kademlia k-buckets. + pub fn num_kbuckets_entries(&mut self) -> usize { + self.kademlia.kbuckets_entries().count() + } } /// Event generated by the `DiscoveryBehaviour`. @@ -203,6 +208,9 @@ pub enum DiscoveryOut { /// Inserting a value into the DHT failed. ValuePutFailed(record::Key), + + /// Started a random Kademlia query. + RandomKademliaStarted, } impl NetworkBehaviour for DiscoveryBehaviour { @@ -330,25 +338,33 @@ impl NetworkBehaviour for DiscoveryBehaviour { // Poll the stream that fires when we need to start a random Kademlia query. while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { - if self.num_connections < self.discovery_only_if_under_num { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { let random_peer_id = PeerId::random(); debug!(target: "sub-libp2p", "Libp2p <= Starting random Kademlia request for \ {:?}", random_peer_id); self.kademlia.get_closest_peers(random_peer_id); + true + } else { debug!( target: "sub-libp2p", "Kademlia paused due to high number of connections ({})", self.num_connections ); - } + false + }; // Schedule the next random query with exponentially increasing delay, // capped at 60 seconds. self.next_kad_random_query = Delay::new(self.duration_to_next_kad); self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + + if actually_started { + let ev = DiscoveryOut::RandomKademliaStarted; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } } // Poll Kademlia. @@ -391,7 +407,14 @@ impl NetworkBehaviour for DiscoveryBehaviour { DiscoveryOut::ValueFound(results) } + Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { + trace!(target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", e); + DiscoveryOut::ValueNotFound(e.into_key()) + } Err(e) => { + warn!(target: "sub-libp2p", + "Libp2p => Failed to get record: {:?}", e); DiscoveryOut::ValueNotFound(e.into_key()) } }; @@ -401,6 +424,8 @@ impl NetworkBehaviour for DiscoveryBehaviour { let ev = match res { Ok(ok) => DiscoveryOut::ValuePut(ok.key), Err(e) => { + warn!(target: "sub-libp2p", + "Libp2p => Failed to put record: {:?}", e); DiscoveryOut::ValuePutFailed(e.into_key()) } }; diff --git a/client/network/src/error.rs b/client/network/src/error.rs index ba5d5c2d0d2b5d3dde2329281e3e02d77edd961e..158e75fcf1d721551a3f8cfb51f4a3d8b93b73f4 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -45,6 +45,8 @@ pub enum Error { /// The second peer id that was found for the bootnode. second_id: PeerId, }, + /// Prometheus metrics error. + Prometheus(prometheus_endpoint::PrometheusError) } // Make `Debug` use the `Display` implementation. @@ -60,6 +62,7 @@ impl std::error::Error for Error { Error::Io(ref err) => Some(err), Error::Client(ref err) => Some(err), Error::DuplicateBootnode { .. } => None, + Error::Prometheus(ref err) => Some(err), } } } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index dd156360330acfa73f8115e027fc31910034ed81..a5397a4e3e6263f2c6c33263b7002549cedcacae 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -78,12 +78,8 @@ //! - DNS for addresses of the form `/dns4/example.com/tcp/5` or `/dns4/example.com/tcp/5/ws`. A //! node's address can contain a domain name. //! -//! The following encryption protocols are supported: -//! -//! - [Secio](https://github.com/libp2p/specs/tree/master/secio). A TLS-1.2-like protocol but -//! without certificates. Support for secio will likely be deprecated in the far future. -//! - [Noise](https://noiseprotocol.org/). Support for noise is very experimental. The details are -//! very blurry and may change at any moment. +//! On top of the base-layer protocol, the [Noise](https://noiseprotocol.org/) protocol is +//! negotiated and applied. The exact handshake protocol is experimental and is subject to change. //! //! The following multiplexing protocols are supported: //! @@ -140,10 +136,6 @@ //! - Light-client requests. When a light client requires information, a random node we have a //! substream open with is chosen, and the information is requested from it. //! - Gossiping. Used for example by grandpa. -//! - Network specialization. The network protocol can be specialized through a template parameter -//! of the network service. This specialization is free to send and receive messages with the -//! remote. This is meant to be used by the chain that is being built on top of Substrate -//! (eg. Polkadot). //! //! It is intended that in the future each of these components gets more isolated, so that they //! are free to open and close their own substreams, and so that syncing and light client requests @@ -160,7 +152,8 @@ //! //! After the `NetworkWorker` has been created, the important things to do are: //! -//! - Calling `NetworkWorker::poll` in order to advance the network. +//! - Calling `NetworkWorker::poll` in order to advance the network. This can be done by +//! dispatching a background task with the [`NetworkWorker`]. //! - Calling `on_block_import` whenever a block is added to the client. //! - Calling `on_block_finalized` whenever a block is finalized. //! - Calling `trigger_repropagate` when a transaction is added to the pool. @@ -180,34 +173,27 @@ mod utils; pub mod config; pub mod error; +pub mod network_state; -pub use chain::{Client as ClientHandle, FinalityProofProvider}; -pub use service::{ - NetworkService, NetworkWorker, TransactionPool, ExHashT, ReportHandle, - NetworkStateInfo, -}; -pub use protocol::{PeerInfo, Context, ProtocolConfig, message, specialization}; +pub use service::{NetworkService, NetworkStateInfo, NetworkWorker, ExHashT, ReportHandle}; +pub use protocol::PeerInfo; pub use protocol::event::{Event, DhtEvent}; pub use protocol::sync::SyncState; pub use libp2p::{Multiaddr, PeerId}; #[doc(inline)] pub use libp2p::multiaddr; -pub use message::{generic as generic_message, RequestId, Status as StatusMessage}; -pub use on_demand_layer::{OnDemand, RemoteResponse}; -pub use sc_peerset::ReputationChange; - -// Used by the `construct_simple_protocol!` macro. +// Note: these re-exports shouldn't be part of the public API of the crate and will be removed in +// the future. +#[doc(hidden)] +pub use protocol::message; #[doc(hidden)] -pub use sp_runtime::traits::Block as BlockT; +pub use protocol::message::Status as StatusMessage; -use libp2p::core::ConnectedPoint; -use serde::{Deserialize, Serialize}; -use slog_derive::SerdeValue; -use std::{collections::{HashMap, HashSet}, time::Duration}; +pub use sc_peerset::ReputationChange; /// Extension trait for `NetworkBehaviour` that also accepts discovering nodes. -pub trait DiscoveryNetBehaviour { +trait DiscoveryNetBehaviour { /// Notify the protocol that we have learned about the existence of nodes. /// /// Can (or most likely will) be called multiple times with the same `PeerId`s. @@ -216,90 +202,3 @@ pub trait DiscoveryNetBehaviour { /// system, or remove nodes that will fail to reach. fn add_discovered_nodes(&mut self, nodes: impl Iterator); } - -/// Returns general information about the networking. -/// -/// Meant for general diagnostic purposes. -/// -/// **Warning**: This API is not stable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] -#[serde(rename_all = "camelCase")] -pub struct NetworkState { - /// PeerId of the local node. - pub peer_id: String, - /// List of addresses the node is currently listening on. - pub listened_addresses: HashSet, - /// List of addresses the node knows it can be reached as. - pub external_addresses: HashSet, - /// List of node we're connected to. - pub connected_peers: HashMap, - /// List of node that we know of but that we're not connected to. - pub not_connected_peers: HashMap, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, - /// State of the peerset manager. - pub peerset: serde_json::Value, -} - -/// Part of the `NetworkState` struct. Unstable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct NetworkStatePeer { - /// How we are connected to the node. - pub endpoint: NetworkStatePeerEndpoint, - /// Node information, as provided by the node itself. Can be empty if not known yet. - pub version_string: Option, - /// Latest ping duration with this node. - pub latest_ping_time: Option, - /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols - /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. - pub enabled: bool, - /// If true, the peer is "open", which means that we have a Substrate-related protocol - /// with this peer. - pub open: bool, - /// List of addresses known for this node. - pub known_addresses: HashSet, -} - -/// Part of the `NetworkState` struct. Unstable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct NetworkStateNotConnectedPeer { - /// List of addresses known for this node. - pub known_addresses: HashSet, - /// Node information, as provided by the node itself, if we were ever connected to this node. - pub version_string: Option, - /// Latest ping duration with this node, if we were ever connected to this node. - pub latest_ping_time: Option, -} - -/// Part of the `NetworkState` struct. Unstable. -#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum NetworkStatePeerEndpoint { - /// We are dialing the given address. - Dialing(Multiaddr), - /// We are listening. - Listening { - /// Local address of the connection. - local_addr: Multiaddr, - /// Address data is sent back to. - send_back_addr: Multiaddr, - }, -} - -impl From for NetworkStatePeerEndpoint { - fn from(endpoint: ConnectedPoint) -> Self { - match endpoint { - ConnectedPoint::Dialer { address } => - NetworkStatePeerEndpoint::Dialing(address), - ConnectedPoint::Listener { local_addr, send_back_addr } => - NetworkStatePeerEndpoint::Listening { - local_addr, - send_back_addr - } - } - } -} diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs new file mode 100644 index 0000000000000000000000000000000000000000..00d53976ae8fc8960e9045b1dbab7d1621429f26 --- /dev/null +++ b/client/network/src/network_state.rs @@ -0,0 +1,111 @@ +// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Information about the networking, for diagnostic purposes. +//! +//! **Warning**: These APIs are not stable. + +use libp2p::{core::ConnectedPoint, Multiaddr}; +use serde::{Deserialize, Serialize}; +use slog_derive::SerdeValue; +use std::{collections::{HashMap, HashSet}, time::Duration}; + +/// Returns general information about the networking. +/// +/// Meant for general diagnostic purposes. +/// +/// **Warning**: This API is not stable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] +#[serde(rename_all = "camelCase")] +pub struct NetworkState { + /// PeerId of the local node. + pub peer_id: String, + /// List of addresses the node is currently listening on. + pub listened_addresses: HashSet, + /// List of addresses the node knows it can be reached as. + pub external_addresses: HashSet, + /// List of node we're connected to. + pub connected_peers: HashMap, + /// List of node that we know of but that we're not connected to. + pub not_connected_peers: HashMap, + /// Downloaded bytes per second averaged over the past few seconds. + pub average_download_per_sec: u64, + /// Uploaded bytes per second averaged over the past few seconds. + pub average_upload_per_sec: u64, + /// State of the peerset manager. + pub peerset: serde_json::Value, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct Peer { + /// How we are connected to the node. + pub endpoint: PeerEndpoint, + /// Node information, as provided by the node itself. Can be empty if not known yet. + pub version_string: Option, + /// Latest ping duration with this node. + pub latest_ping_time: Option, + /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols + /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. + pub enabled: bool, + /// If true, the peer is "open", which means that we have a Substrate-related protocol + /// with this peer. + pub open: bool, + /// List of addresses known for this node. + pub known_addresses: HashSet, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct NotConnectedPeer { + /// List of addresses known for this node. + pub known_addresses: HashSet, + /// Node information, as provided by the node itself, if we were ever connected to this node. + pub version_string: Option, + /// Latest ping duration with this node, if we were ever connected to this node. + pub latest_ping_time: Option, +} + +/// Part of the `NetworkState` struct. Unstable. +#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub enum PeerEndpoint { + /// We are dialing the given address. + Dialing(Multiaddr), + /// We are listening. + Listening { + /// Local address of the connection. + local_addr: Multiaddr, + /// Address data is sent back to. + send_back_addr: Multiaddr, + }, +} + +impl From for PeerEndpoint { + fn from(endpoint: ConnectedPoint) -> Self { + match endpoint { + ConnectedPoint::Dialer { address } => + PeerEndpoint::Dialing(address), + ConnectedPoint::Listener { local_addr, send_back_addr } => + PeerEndpoint::Listening { + local_addr, + send_back_addr + } + } + } +} diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index d5e7ae6252ca147e59da50be6c22b8acb7d772f0..17208aab50fe06d171fab48fadf75c3c487ec711 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . use crate::{DiscoveryNetBehaviour, config::ProtocolId}; -use legacy_proto::{LegacyProto, LegacyProtoOut}; use crate::utils::interval; use bytes::{Bytes, BytesMut}; use futures::prelude::*; +use generic_proto::{GenericProto, GenericProtoOut}; use libp2p::{Multiaddr, PeerId}; use libp2p::core::{ConnectedPoint, nodes::listeners::ListenerId}; use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; @@ -36,13 +36,14 @@ use sp_runtime::traits::{ }; use sp_arithmetic::traits::SaturatedConversion; use message::{BlockAnnounce, BlockAttributes, Direction, FromBlock, Message, RequestId}; -use message::generic::{Message as GenericMessage, ConsensusMessage}; +use message::generic::Message as GenericMessage; use light_dispatch::{LightDispatch, LightDispatchNetwork, RequestData}; -use specialization::NetworkSpecialization; +use prometheus_endpoint::{Registry, Gauge, register, PrometheusError, U64}; use sync::{ChainSync, SyncState}; use crate::service::{TransactionPool, ExHashT}; use crate::config::{BoxFinalityProofRequestBuilder, Roles}; use rustc_hex::ToHex; +use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet}; use std::sync::Arc; use std::fmt::Write; @@ -64,7 +65,7 @@ pub mod api { } } -mod legacy_proto; +mod generic_proto; mod util; pub mod block_requests; @@ -72,7 +73,6 @@ pub mod message; pub mod event; pub mod light_client_handler; pub mod light_dispatch; -pub mod specialization; pub mod sync; pub use block_requests::BlockRequests; @@ -132,10 +132,111 @@ mod rep { pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); /// Peer role does not match (e.g. light peer connecting to another light peer). pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); +} + +struct Metrics { + handshaking_peers: Gauge, + obsolete_requests: Gauge, + peers: Gauge, + queued_blocks: Gauge, + fork_targets: Gauge, + finality_proofs_pending: Gauge, + finality_proofs_active: Gauge, + finality_proofs_failed: Gauge, + finality_proofs_importing: Gauge, + justifications_pending: Gauge, + justifications_active: Gauge, + justifications_failed: Gauge, + justifications_importing: Gauge +} + +impl Metrics { + fn register(r: &Registry) -> Result { + Ok(Metrics { + handshaking_peers: { + let g = Gauge::new("sync_handshaking_peers", "number of newly connected peers")?; + register(g, r)? + }, + obsolete_requests: { + let g = Gauge::new("sync_obsolete_requests", "total number of obsolete requests")?; + register(g, r)? + }, + peers: { + let g = Gauge::new("sync_peers", "number of peers we sync with")?; + register(g, r)? + }, + queued_blocks: { + let g = Gauge::new("sync_queued_blocks", "number of blocks in import queue")?; + register(g, r)? + }, + fork_targets: { + let g = Gauge::new("sync_fork_targets", "fork sync targets")?; + register(g, r)? + }, + justifications_pending: { + let g = Gauge::new( + "sync_extra_justifications_pending", + "number of pending extra justifications requests" + )?; + register(g, r)? + }, + justifications_active: { + let g = Gauge::new( + "sync_extra_justifications_active", + "number of active extra justifications requests" + )?; + register(g, r)? + }, + justifications_failed: { + let g = Gauge::new( + "sync_extra_justifications_failed", + "number of failed extra justifications requests" + )?; + register(g, r)? + }, + justifications_importing: { + let g = Gauge::new( + "sync_extra_justifications_importing", + "number of importing extra justifications requests" + )?; + register(g, r)? + }, + finality_proofs_pending: { + let g = Gauge::new( + "sync_extra_finality_proofs_pending", + "number of pending extra finality proof requests" + )?; + register(g, r)? + }, + finality_proofs_active: { + let g = Gauge::new( + "sync_extra_finality_proofs_active", + "number of active extra finality proof requests" + )?; + register(g, r)? + }, + finality_proofs_failed: { + let g = Gauge::new( + "sync_extra_finality_proofs_failed", + "number of failed extra finality proof requests" + )?; + register(g, r)? + }, + finality_proofs_importing: { + let g = Gauge::new( + "sync_extra_finality_proofs_importing", + "number of importing extra finality proof requests" + )?; + register(g, r)? + }, + }) + } } // Lock must always be taken in order declared here. -pub struct Protocol, H: ExHashT> { +pub struct Protocol { /// Interval at which we call `tick`. tick_timeout: Pin + Send>>, /// Interval at which we call `propagate_extrinsics`. @@ -145,7 +246,6 @@ pub struct Protocol, H: ExHashT> { light_dispatch: LightDispatch, genesis_hash: B::Hash, sync: ChainSync, - specialization: S, context_data: ContextData, /// List of nodes for which we perform additional logging because they are important for the /// user. @@ -158,9 +258,13 @@ pub struct Protocol, H: ExHashT> { /// When asked for a proof of finality, we use this struct to build one. finality_proof_provider: Option>>, /// Handles opening the unique substream and sending and receiving raw messages. - behaviour: LegacyProto, - /// List of notification protocols that have been registered. - registered_notif_protocols: HashSet, + behaviour: GenericProto, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: HashMap>, + /// For each protocol name, the legacy gossiping engine ID. + protocol_engine_by_name: HashMap, ConsensusEngineId>, + /// Prometheus metrics. + metrics: Option, } #[derive(Default)] @@ -207,7 +311,7 @@ pub struct PeerInfo { } struct LightDispatchIn<'a> { - behaviour: &'a mut LegacyProto, + behaviour: &'a mut GenericProto, peerset: sc_peerset::PeersetHandle, } @@ -332,55 +436,6 @@ impl<'a, B: BlockT> LightDispatchNetwork for LightDispatchIn<'a> { } } -/// Context for a network-specific handler. -pub trait Context { - /// Adjusts the reputation of the peer. Use this to point out that a peer has been malign or - /// irresponsible or appeared lazy. - fn report_peer(&mut self, who: PeerId, reputation: sc_peerset::ReputationChange); - - /// Force disconnecting from a peer. Use this when a peer misbehaved. - fn disconnect_peer(&mut self, who: PeerId); - - /// Send a chain-specific message to a peer. - fn send_chain_specific(&mut self, who: PeerId, message: Vec); -} - -/// Protocol context. -struct ProtocolContext<'a, B: 'a + BlockT, H: 'a + ExHashT> { - behaviour: &'a mut LegacyProto, - context_data: &'a mut ContextData, - peerset_handle: &'a sc_peerset::PeersetHandle, -} - -impl<'a, B: BlockT + 'a, H: 'a + ExHashT> ProtocolContext<'a, B, H> { - fn new( - context_data: &'a mut ContextData, - behaviour: &'a mut LegacyProto, - peerset_handle: &'a sc_peerset::PeersetHandle, - ) -> Self { - ProtocolContext { context_data, peerset_handle, behaviour } - } -} - -impl<'a, B: BlockT + 'a, H: ExHashT + 'a> Context for ProtocolContext<'a, B, H> { - fn report_peer(&mut self, who: PeerId, reputation: sc_peerset::ReputationChange) { - self.peerset_handle.report_peer(who, reputation) - } - - fn disconnect_peer(&mut self, who: PeerId) { - self.behaviour.disconnect_peer(&who) - } - - fn send_chain_specific(&mut self, who: PeerId, message: Vec) { - send_message:: ( - self.behaviour, - &mut self.context_data.stats, - &who, - GenericMessage::ChainSpecific(message) - ) - } -} - /// Data necessary to create a context. struct ContextData { // All connected peers @@ -407,20 +462,20 @@ impl Default for ProtocolConfig { } } -impl, H: ExHashT> Protocol { +impl Protocol { /// Create a new instance. pub fn new( config: ProtocolConfig, chain: Arc>, checker: Arc>, - specialization: S, transaction_pool: Arc>, finality_proof_provider: Option>>, finality_proof_request_builder: Option>, protocol_id: ProtocolId, peerset_config: sc_peerset::PeersetConfig, - block_announce_validator: Box + Send> - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { + block_announce_validator: Box + Send>, + metrics_registry: Option<&Registry> + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { let info = chain.info(); let sync = ChainSync::new( config.roles, @@ -442,7 +497,7 @@ impl, H: ExHashT> Protocol { let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let behaviour = LegacyProto::new(protocol_id, versions, peerset); + let behaviour = GenericProto::new(protocol_id, versions, peerset); let protocol = Protocol { tick_timeout: Box::pin(interval(TICK_TIMEOUT)), @@ -456,14 +511,19 @@ impl, H: ExHashT> Protocol { light_dispatch: LightDispatch::new(checker), genesis_hash: info.genesis_hash, sync, - specialization, handshaking_peers: HashMap::new(), important_peers, transaction_pool, finality_proof_provider, peerset_handle: peerset_handle.clone(), behaviour, - registered_notif_protocols: HashSet::new(), + protocol_name_by_engine: HashMap::new(), + protocol_engine_by_name: HashMap::new(), + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + } }; Ok((protocol, peerset_handle)) @@ -479,6 +539,16 @@ impl, H: ExHashT> Protocol { self.behaviour.is_open(peer_id) } + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers(&self) -> impl Iterator { + self.behaviour.requested_peers() + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.behaviour.num_discovered_peers() + } + /// Disconnects the given peer if we are connected to it. pub fn disconnect_peer(&mut self, peer_id: &PeerId) { self.behaviour.disconnect_peer(peer_id) @@ -646,7 +716,7 @@ impl, H: ExHashT> Protocol { GenericMessage::RemoteReadChildRequest(request) => self.on_remote_read_child_request(who, request), GenericMessage::Consensus(msg) => - return if self.registered_notif_protocols.contains(&msg.engine_id) { + return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { CustomMessageOutcome::NotificationsReceived { remote: who.clone(), messages: vec![(msg.engine_id, From::from(msg.data))], @@ -659,7 +729,7 @@ impl, H: ExHashT> Protocol { let messages = messages .into_iter() .filter_map(|msg| { - if self.registered_notif_protocols.contains(&msg.engine_id) { + if self.protocol_name_by_engine.contains_key(&msg.engine_id) { Some((msg.engine_id, From::from(msg.data))) } else { warn!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); @@ -677,11 +747,6 @@ impl, H: ExHashT> Protocol { CustomMessageOutcome::None }; }, - GenericMessage::ChainSpecific(msg) => self.specialization.on_message( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle), - who, - msg, - ), } CustomMessageOutcome::None @@ -706,14 +771,6 @@ impl, H: ExHashT> Protocol { ); } - /// Locks `self` and returns a context plus the network specialization. - pub fn specialization_lock<'a>( - &'a mut self, - ) -> (impl Context + 'a, &'a mut S) { - let context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - (context, &mut self.specialization) - } - /// Called when a new peer is connected pub fn on_peer_connected(&mut self, who: PeerId) { trace!(target: "sync", "Connecting {}", who); @@ -735,9 +792,7 @@ impl, H: ExHashT> Protocol { self.context_data.peers.remove(&peer) }; if let Some(_peer_data) = removed { - let mut context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); self.sync.peer_disconnected(peer.clone()); - self.specialization.on_disconnect(&mut context, peer.clone()); self.light_dispatch.on_disconnect(LightDispatchIn { behaviour: &mut self.behaviour, peerset: self.peerset_handle.clone(), @@ -766,12 +821,14 @@ impl, H: ExHashT> Protocol { peer: PeerId, request: message::BlockRequest ) { - trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?}", + trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", request.id, peer, request.from, request.to, - request.max); + request.max, + request.fields, + ); // sending block requests to the node that is unable to serve it is considered a bad behavior if !self.config.roles.is_full() { @@ -819,6 +876,11 @@ impl, H: ExHashT> Protocol { message_queue: None, justification, }; + // Stop if we don't have requested block body + if get_body && block_data.body.is_none() { + trace!(target: "sync", "Missing data for block request."); + break; + } blocks.push(block_data); match request.direction { message::Direction::Ascending => id = BlockId::Number(number + One::one()), @@ -849,7 +911,7 @@ impl, H: ExHashT> Protocol { request: message::BlockRequest, response: message::BlockResponse, ) -> CustomMessageOutcome { - let blocks_range = match ( + let blocks_range = || match ( response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), ) { @@ -861,7 +923,7 @@ impl, H: ExHashT> Protocol { response.id, peer, response.blocks.len(), - blocks_range + blocks_range(), ); if request.fields == message::BlockAttributes::JUSTIFICATION { @@ -876,6 +938,20 @@ impl, H: ExHashT> Protocol { } } } else { + // Validate fields against the request. + if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing header for a block"); + return CustomMessageOutcome::None + } + if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing body for a block"); + return CustomMessageOutcome::None + } + match self.sync.on_block_data(peer, Some(request), response) { Ok(sync::OnBlockData::Import(origin, blocks)) => CustomMessageOutcome::BlockImport(origin, blocks), @@ -901,6 +977,7 @@ impl, H: ExHashT> Protocol { behaviour: &mut self.behaviour, peerset: self.peerset_handle.clone(), }); + self.report_metrics() } fn maintain_peers(&mut self) { @@ -936,9 +1013,6 @@ impl, H: ExHashT> Protocol { } } - self.specialization.maintain_peers( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle) - ); for p in aborting { self.behaviour.disconnect_peer(&p); self.peerset_handle.report_peer(p, rep::TIMEOUT); @@ -1054,13 +1128,10 @@ impl, H: ExHashT> Protocol { } } - let mut context = ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle); - self.specialization.on_connect(&mut context, who.clone(), status); - // Notify all the notification protocols as open. CustomMessageOutcome::NotificationStreamOpened { remote: who, - protocols: self.registered_notif_protocols.iter().cloned().collect(), + protocols: self.protocol_name_by_engine.keys().cloned().collect(), roles: info.roles, } } @@ -1075,18 +1146,15 @@ impl, H: ExHashT> Protocol { engine_id: ConsensusEngineId, message: impl Into> ) { - if !self.registered_notif_protocols.contains(&engine_id) { + if let Some(protocol_name) = self.protocol_name_by_engine.get(&engine_id) { + self.behaviour.write_notification(&target, engine_id, protocol_name.clone(), message); + } else { error!( target: "sub-libp2p", "Sending a notification with a protocol that wasn't registered: {:?}", engine_id ); } - - self.send_message(&target, GenericMessage::Consensus(ConsensusMessage { - engine_id, - data: message.into(), - })); } /// Registers a new notifications protocol. @@ -1096,9 +1164,14 @@ impl, H: ExHashT> Protocol { pub fn register_notifications_protocol( &mut self, engine_id: ConsensusEngineId, + protocol_name: impl Into>, ) -> Vec { - if !self.registered_notif_protocols.insert(engine_id) { - error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", engine_id); + let protocol_name = protocol_name.into(); + if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { + error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); + } else { + self.behaviour.register_notif_protocol(protocol_name.clone(), engine_id, Vec::new()); + self.protocol_engine_by_name.insert(protocol_name, engine_id); } // Registering a protocol while we already have open connections isn't great, but for now @@ -1286,7 +1359,7 @@ impl, H: ExHashT> Protocol { roles: self.config.roles.into(), best_number: info.best_number, best_hash: info.best_hash, - chain_status: self.specialization.status(), + chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible }; self.send_message(&who, GenericMessage::Status(status)) @@ -1355,15 +1428,10 @@ impl, H: ExHashT> Protocol { /// Call this when a block has been imported in the import queue and we should announce it on /// the network. - pub fn on_block_imported(&mut self, hash: B::Hash, header: &B::Header, data: Vec, is_best: bool) { + pub fn on_block_imported(&mut self, header: &B::Header, data: Vec, is_best: bool) { if is_best { self.sync.update_chain_info(header); } - self.specialization.on_block_imported( - &mut ProtocolContext::new(&mut self.context_data, &mut self.behaviour, &self.peerset_handle), - hash.clone(), - header, - ); // blocks are not announced by light clients if self.config.roles.is_light() { @@ -1524,7 +1592,10 @@ impl, H: ExHashT> Protocol { trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, keys_str(), request.block); - let proof = match self.context_data.chain.read_proof(&request.block, &request.keys) { + let proof = match self.context_data.chain.read_proof( + &request.block, + &mut request.keys.iter().map(AsRef::as_ref) + ) { Ok(proof) => proof, Err(error) => { trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", @@ -1574,7 +1645,7 @@ impl, H: ExHashT> Protocol { &request.block, &request.storage_key, child_info, - &request.keys, + &mut request.keys.iter().map(AsRef::as_ref), ) { Ok(proof) => proof, Err(error) => { @@ -1815,6 +1886,40 @@ impl, H: ExHashT> Protocol { } out } + + fn report_metrics(&self) { + use std::convert::TryInto; + + if let Some(metrics) = &self.metrics { + let mut obsolete_requests: u64 = 0; + for peer in self.context_data.peers.values() { + let n = peer.obsolete_requests.len().try_into().unwrap_or(std::u64::MAX); + obsolete_requests = obsolete_requests.saturating_add(n); + } + metrics.obsolete_requests.set(obsolete_requests); + + let n = self.handshaking_peers.len().try_into().unwrap_or(std::u64::MAX); + metrics.handshaking_peers.set(n); + + let n = self.context_data.peers.len().try_into().unwrap_or(std::u64::MAX); + metrics.peers.set(n); + + let m = self.sync.metrics(); + + metrics.fork_targets.set(m.fork_targets.into()); + metrics.queued_blocks.set(m.queued_blocks.into()); + + metrics.justifications_pending.set(m.justifications.pending_requests.into()); + metrics.justifications_active.set(m.justifications.active_requests.into()); + metrics.justifications_failed.set(m.justifications.failed_requests.into()); + metrics.justifications_importing.set(m.justifications.importing_requests.into()); + + metrics.finality_proofs_pending.set(m.finality_proofs.pending_requests.into()); + metrics.finality_proofs_active.set(m.finality_proofs.active_requests.into()); + metrics.finality_proofs_failed.set(m.finality_proofs.failed_requests.into()); + metrics.finality_proofs_importing.set(m.finality_proofs.importing_requests.into()); + } + } } /// Outcome of an incoming custom message. @@ -1833,7 +1938,7 @@ pub enum CustomMessageOutcome { } fn send_request( - behaviour: &mut LegacyProto, + behaviour: &mut GenericProto, stats: &mut HashMap<&'static str, PacketStats>, peers: &mut HashMap>, who: &PeerId, @@ -1854,7 +1959,7 @@ fn send_request( } fn send_message( - behaviour: &mut LegacyProto, + behaviour: &mut GenericProto, stats: &mut HashMap<&'static str, PacketStats>, who: &PeerId, message: Message, @@ -1866,9 +1971,8 @@ fn send_message( behaviour.send_packet(who, encoded); } -impl, H: ExHashT> NetworkBehaviour for -Protocol { - type ProtocolsHandler = ::ProtocolsHandler; +impl NetworkBehaviour for Protocol { + type ProtocolsHandler = ::ProtocolsHandler; type OutEvent = CustomMessageOutcome; fn new_handler(&mut self) -> Self::ProtocolsHandler { @@ -1954,25 +2058,21 @@ Protocol { }; let outcome = match event { - LegacyProtoOut::CustomProtocolOpen { peer_id, version, .. } => { - debug_assert!( - version <= CURRENT_VERSION as u8 - && version >= MIN_VERSION as u8 - ); + GenericProtoOut::CustomProtocolOpen { peer_id, .. } => { self.on_peer_connected(peer_id.clone()); CustomMessageOutcome::None } - LegacyProtoOut::CustomProtocolClosed { peer_id, .. } => { + GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { self.on_peer_disconnected(peer_id.clone()); // Notify all the notification protocols as closed. CustomMessageOutcome::NotificationStreamClosed { remote: peer_id, - protocols: self.registered_notif_protocols.iter().cloned().collect(), + protocols: self.protocol_name_by_engine.keys().cloned().collect(), } }, - LegacyProtoOut::CustomMessage { peer_id, message } => + GenericProtoOut::CustomMessage { peer_id, message } => self.on_custom_message(peer_id, message), - LegacyProtoOut::Clogged { peer_id, messages } => { + GenericProtoOut::Clogged { peer_id, messages } => { debug!(target: "sync", "{} clogging messages:", messages.len()); for msg in messages.into_iter().take(5) { let message: Option> = Decode::decode(&mut &msg[..]).ok(); @@ -2028,13 +2128,13 @@ Protocol { } } -impl, H: ExHashT> DiscoveryNetBehaviour for Protocol { +impl DiscoveryNetBehaviour for Protocol { fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { self.behaviour.add_discovered_nodes(peer_ids) } } -impl, H: ExHashT> Drop for Protocol { +impl Drop for Protocol { fn drop(&mut self) { debug!(target: "sync", "Network stats:\n{}", self.format_stats()); } diff --git a/client/network/src/protocol/block_requests.rs b/client/network/src/protocol/block_requests.rs index ef970657c5ff9477bc939b6dd98963b4e41387e9..20a99378b16c992436ea2a4ee686fa0fc684d7f3 100644 --- a/client/network/src/protocol/block_requests.rs +++ b/client/network/src/protocol/block_requests.rs @@ -111,7 +111,7 @@ impl Config { let mut v = Vec::new(); v.extend_from_slice(b"/"); v.extend_from_slice(id.as_bytes()); - v.extend_from_slice(b"/sync/1"); + v.extend_from_slice(b"/sync/2"); self.protocol = v.into(); self } @@ -146,8 +146,7 @@ where , request: &api::v1::BlockRequest ) -> Result { - log::trace!("block request {} from peer {}: from block {:?} to block {:?}, max blocks {:?}", - request.id, + log::trace!("block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", peer, request.from_block, request.to_block, @@ -242,7 +241,7 @@ where } } - Ok(api::v1::BlockResponse { id: request.id, blocks }) + Ok(api::v1::BlockResponse { blocks }) } } @@ -274,10 +273,10 @@ where fn inject_node_event(&mut self, peer: PeerId, Request(request, mut stream): Request) { match self.on_block_request(&peer, &request) { Ok(res) => { - log::trace!("enqueueing block response {} for peer {} with {} blocks", res.id, peer, res.blocks.len()); + log::trace!("enqueueing block response for peer {} with {} blocks", peer, res.blocks.len()); let mut data = Vec::with_capacity(res.encoded_len()); if let Err(e) = res.encode(&mut data) { - log::debug!("error encoding block response {} for peer {}: {}", res.id, peer, e) + log::debug!("error encoding block response for peer {}: {}", peer, e) } else { let future = async move { if let Err(e) = write_one(&mut stream, data).await { @@ -287,7 +286,7 @@ where self.outgoing.push(future.boxed()) } } - Err(e) => log::debug!("error handling block request {} from peer {}: {}", request.id, peer, e) + Err(e) => log::debug!("error handling block request from peer {}: {}", peer, e) } } diff --git a/client/network/src/protocol/legacy_proto.rs b/client/network/src/protocol/generic_proto.rs similarity index 86% rename from client/network/src/protocol/legacy_proto.rs rename to client/network/src/protocol/generic_proto.rs index 434782f7d5065de6372fa651f1ed1ac29b0209aa..f703287f386fdcf9aebcfc8d3fa3d0c5971c7eff 100644 --- a/client/network/src/protocol/legacy_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -17,10 +17,10 @@ //! Implementation of libp2p's `NetworkBehaviour` trait that opens a single substream with the //! remote and then allows any communication with them. //! -//! The `Protocol` struct uses `LegacyProto` in order to open substreams with the rest of the +//! The `Protocol` struct uses `GenericProto` in order to open substreams with the rest of the //! network, then performs the Substrate protocol handling on top. -pub use self::behaviour::{LegacyProto, LegacyProtoOut}; +pub use self::behaviour::{GenericProto, GenericProtoOut}; mod behaviour; mod handler; diff --git a/client/network/src/protocol/legacy_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs similarity index 84% rename from client/network/src/protocol/legacy_proto/behaviour.rs rename to client/network/src/protocol/generic_proto/behaviour.rs index 69c89be9a36957f808773a76d54e8b43ea195251..727415baaf5bcd9a4b06ca847d9aeb6968f78ab1 100644 --- a/client/network/src/protocol/legacy_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -15,9 +15,12 @@ // along with Substrate. If not, see . use crate::{DiscoveryNetBehaviour, config::ProtocolId}; -use crate::protocol::legacy_proto::handler::{CustomProtoHandlerProto, CustomProtoHandlerOut, CustomProtoHandlerIn}; -use crate::protocol::legacy_proto::upgrade::RegisteredProtocol; +use crate::protocol::message::generic::{Message as GenericMessage, ConsensusMessage}; +use crate::protocol::generic_proto::handler::{NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}; +use crate::protocol::generic_proto::upgrade::RegisteredProtocol; + use bytes::BytesMut; +use codec::Encode as _; use fnv::FnvHashMap; use futures::prelude::*; use libp2p::core::{ConnectedPoint, Multiaddr, PeerId}; @@ -25,16 +28,32 @@ use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; use log::{debug, error, trace, warn}; use rand::distributions::{Distribution as _, Uniform}; use smallvec::SmallVec; -use std::{borrow::Cow, collections::hash_map::Entry, cmp, error, mem, pin::Pin}; -use std::time::Duration; -use wasm_timer::Instant; +use sp_runtime::ConsensusEngineId; +use std::{borrow::Cow, collections::hash_map::Entry, cmp}; +use std::{error, mem, pin::Pin, str, time::Duration}; use std::task::{Context, Poll}; +use wasm_timer::Instant; /// Network behaviour that handles opening substreams for custom protocols with other nodes. /// +/// ## Legacy vs new protocol +/// +/// The `GenericProto` behaves as following: +/// +/// - Whenever a connection is established, we open a single substream (called "legay protocol" in +/// the source code). This substream name depends on the `protocol_id` and `versions` passed at +/// initialization. If the remote refuses this substream, we close the connection. +/// +/// - For each registered protocol, we also open an additional substream for this protocol. If the +/// remote refuses this substream, then it's fine. +/// +/// - Whenever we want to send a message, we can call either `send_packet` to force the legacy +/// substream, or `write_notification` to indicate a registered protocol. If the registered +/// protocol was refused or isn't supported by the remote, we always use the legacy instead. +/// /// ## How it works /// -/// The role of the `LegacyProto` is to synchronize the following components: +/// The role of the `GenericProto` is to synchronize the following components: /// /// - The libp2p swarm that opens new connections and reports disconnects. /// - The connection handler (see `handler.rs`) that handles individual connections. @@ -60,9 +79,12 @@ use std::task::{Context, Poll}; /// Note that this "banning" system is not an actual ban. If a "banned" node tries to connect to /// us, we accept the connection. The "banning" system is only about delaying dialing attempts. /// -pub struct LegacyProto { - /// List of protocols to open with peers. Never modified. - protocol: RegisteredProtocol, +pub struct GenericProto { + /// Legacy protocol to open with peers. Never modified. + legacy_protocol: RegisteredProtocol, + + /// Notification protocols. Entries are only ever added and not removed. + notif_protocols: Vec<(Cow<'static, [u8]>, ConsensusEngineId, Vec)>, /// Receiver for instructions about who to connect to or disconnect from. peerset: sc_peerset::Peerset, @@ -79,7 +101,7 @@ pub struct LegacyProto { next_incoming_index: sc_peerset::IncomingIndex, /// Events to produce from `poll()`. - events: SmallVec<[NetworkBehaviourAction; 4]>, + events: SmallVec<[NetworkBehaviourAction; 4]>, } /// State of a peer we're connected to. @@ -169,6 +191,20 @@ impl PeerState { PeerState::Incoming { .. } => false, } } + + /// True if that node has been requested by the PSM. + fn is_requested(&self) -> bool { + match self { + PeerState::Poisoned => false, + PeerState::Banned { .. } => false, + PeerState::PendingRequest { .. } => true, + PeerState::Requested => true, + PeerState::Disabled { .. } => false, + PeerState::DisabledPendingEnable { .. } => true, + PeerState::Enabled { .. } => true, + PeerState::Incoming { .. } => false, + } + } } /// State of an "incoming" message sent to the peer set manager. @@ -183,13 +219,11 @@ struct IncomingPeer { incoming_id: sc_peerset::IncomingIndex, } -/// Event that can be emitted by the `LegacyProto`. +/// Event that can be emitted by the `GenericProto`. #[derive(Debug)] -pub enum LegacyProtoOut { +pub enum GenericProtoOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, /// Id of the node we have opened a connection with. peer_id: PeerId, /// Endpoint used for this custom protocol. @@ -205,6 +239,8 @@ pub enum LegacyProtoOut { }, /// Receives a message on a custom protocol substream. + /// + /// Also concerns received notifications for the notifications API. CustomMessage { /// Id of the peer the message came from. peer_id: PeerId, @@ -222,17 +258,18 @@ pub enum LegacyProtoOut { }, } -impl LegacyProto { +impl GenericProto { /// Creates a `CustomProtos`. pub fn new( protocol: impl Into, versions: &[u8], peerset: sc_peerset::Peerset, ) -> Self { - let protocol = RegisteredProtocol::new(protocol, versions); + let legacy_protocol = RegisteredProtocol::new(protocol, versions); - LegacyProto { - protocol, + GenericProto { + legacy_protocol, + notif_protocols: Vec::new(), peerset, peers: FnvHashMap::default(), incoming: SmallVec::new(), @@ -241,6 +278,24 @@ impl LegacyProto { } } + /// Registers a new notifications protocol. + /// + /// You are very strongly encouraged to call this method very early on. Any open connection + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &mut self, + protocol_name: impl Into>, + engine_id: ConsensusEngineId, + handshake_msg: impl Into> + ) { + self.notif_protocols.push((protocol_name.into(), engine_id, handshake_msg.into())); + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.peerset.num_discovered_peers() + } + /// Returns the list of all the peers we have an open channel to. pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) @@ -292,7 +347,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until } @@ -313,7 +368,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); let banned_until = ban.map(|dur| Instant::now() + dur); *entry.into_mut() = PeerState::Disabled { open: false, connected_point, banned_until } @@ -324,6 +379,11 @@ impl LegacyProto { } } + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers.iter().filter(|(_, state)| state.is_requested()).map(|(id, _)| id) + } + /// Returns true if we try to open protocols with the given peer. pub fn is_enabled(&self, peer_id: &PeerId) -> bool { match self.peers.get(peer_id) { @@ -339,6 +399,44 @@ impl LegacyProto { } } + /// Sends a notification to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even if we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + /// + /// > **Note**: Ideally the `engine_id` parameter wouldn't be necessary. See the documentation + /// > of [`NotifsHandlerIn`] for more information. + pub fn write_notification( + &mut self, + target: &PeerId, + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, + message: impl Into>, + ) { + if !self.is_open(target) { + return; + } + + trace!( + target: "sub-libp2p", + "External API => Notification for {:?} with protocol {:?}", + target, + str::from_utf8(&protocol_name) + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + + self.events.push(NetworkBehaviourAction::SendEvent { + peer_id: target.clone(), + event: NotifsHandlerIn::SendNotification { + message: message.into(), + engine_id, + protocol_name, + }, + }); + } + /// Sends a message to a peer. /// /// Has no effect if the custom protocol is not open with the given peer. @@ -354,7 +452,7 @@ impl LegacyProto { trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: target.clone(), - event: CustomProtoHandlerIn::SendCustomMessage { + event: NotifsHandlerIn::SendLegacy { message, } }); @@ -416,7 +514,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *occ_entry.into_mut() = PeerState::Enabled { connected_point, open }; }, @@ -434,7 +532,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: occ_entry.key().clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *occ_entry.into_mut() = PeerState::Enabled { connected_point, open: false }; }, @@ -491,7 +589,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: entry.key().clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *entry.into_mut() = PeerState::Disabled { open, connected_point, banned_until: None } }, @@ -555,7 +653,7 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *state = PeerState::Enabled { open: false, connected_point }; @@ -597,13 +695,13 @@ impl LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: incoming.peer_id, - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *state = PeerState::Disabled { open: false, connected_point, banned_until: None }; } } -impl DiscoveryNetBehaviour for LegacyProto { +impl DiscoveryNetBehaviour for GenericProto { fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); @@ -612,12 +710,12 @@ impl DiscoveryNetBehaviour for LegacyProto { } } -impl NetworkBehaviour for LegacyProto { - type ProtocolsHandler = CustomProtoHandlerProto; - type OutEvent = LegacyProtoOut; +impl NetworkBehaviour for GenericProto { + type ProtocolsHandler = NotifsHandlerProto; + type OutEvent = GenericProtoOut; fn new_handler(&mut self) -> Self::ProtocolsHandler { - CustomProtoHandlerProto::new(self.protocol.clone()) + NotifsHandlerProto::new(self.legacy_protocol.clone(), self.notif_protocols.clone()) } fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { @@ -634,7 +732,7 @@ impl NetworkBehaviour for LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *st = PeerState::Enabled { open: false, connected_point }; } @@ -677,7 +775,7 @@ impl NetworkBehaviour for LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *st = PeerState::Disabled { open: false, connected_point, banned_until }; } @@ -707,7 +805,7 @@ impl NetworkBehaviour for LegacyProto { } if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -724,7 +822,7 @@ impl NetworkBehaviour for LegacyProto { self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -746,7 +844,7 @@ impl NetworkBehaviour for LegacyProto { if open { debug!(target: "sub-libp2p", "External API <= Closed({:?})", peer_id); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { peer_id: peer_id.clone(), reason: "Disconnected by libp2p".into(), }; @@ -817,10 +915,10 @@ impl NetworkBehaviour for LegacyProto { fn inject_node_event( &mut self, source: PeerId, - event: CustomProtoHandlerOut, + event: NotifsHandlerOut, ) { match event { - CustomProtoHandlerOut::CustomProtocolClosed { reason } => { + NotifsHandlerOut::Closed { reason } => { debug!(target: "sub-libp2p", "Handler({:?}) => Closed: {}", source, reason); let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { @@ -831,7 +929,7 @@ impl NetworkBehaviour for LegacyProto { }; debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = LegacyProtoOut::CustomProtocolClosed { + let event = GenericProtoOut::CustomProtocolClosed { reason, peer_id: source.clone(), }; @@ -847,7 +945,7 @@ impl NetworkBehaviour for LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: source.clone(), - event: CustomProtoHandlerIn::Disable, + event: NotifsHandlerIn::Disable, }); *entry.into_mut() = PeerState::Disabled { @@ -873,8 +971,8 @@ impl NetworkBehaviour for LegacyProto { } } - CustomProtoHandlerOut::CustomProtocolOpen { version } => { - debug!(target: "sub-libp2p", "Handler({:?}) => Open: version {:?}", source, version); + NotifsHandlerOut::Open => { + debug!(target: "sub-libp2p", "Handler({:?}) => Open", source); let endpoint = match self.peers.get_mut(&source) { Some(PeerState::Enabled { ref mut open, ref connected_point }) | Some(PeerState::DisabledPendingEnable { ref mut open, ref connected_point, .. }) | @@ -889,8 +987,7 @@ impl NetworkBehaviour for LegacyProto { }; debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = LegacyProtoOut::CustomProtocolOpen { - version, + let event = GenericProtoOut::CustomProtocolOpen { peer_id: source, endpoint, }; @@ -898,11 +995,11 @@ impl NetworkBehaviour for LegacyProto { self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } - CustomProtoHandlerOut::CustomMessage { message } => { + NotifsHandlerOut::CustomMessage { message } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = LegacyProtoOut::CustomMessage { + let event = GenericProtoOut::CustomMessage { peer_id: source, message, }; @@ -910,25 +1007,50 @@ impl NetworkBehaviour for LegacyProto { self.events.push(NetworkBehaviourAction::GenerateEvent(event)); } - CustomProtoHandlerOut::Clogged { messages } => { + NotifsHandlerOut::Notification { protocol_name, engine_id, message } => { + debug_assert!(self.is_open(&source)); + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({:?})", + source, + str::from_utf8(&protocol_name) + ); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::CustomMessage { + peer_id: source, + message: { + let message = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { + engine_id, + data: message.to_vec(), + }); + + // Note that we clone `message` here. + From::from(&message.encode()[..]) + }, + }; + + self.events.push(NetworkBehaviourAction::GenerateEvent(event)); + } + + NotifsHandlerOut::Clogged { messages } => { debug_assert!(self.is_open(&source)); trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ pretty large", source); - self.events.push(NetworkBehaviourAction::GenerateEvent(LegacyProtoOut::Clogged { + self.events.push(NetworkBehaviourAction::GenerateEvent(GenericProtoOut::Clogged { peer_id: source, messages, })); } // Don't do anything for non-severe errors except report them. - CustomProtoHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { + NotifsHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", source, error) } - CustomProtoHandlerOut::ProtocolError { error, .. } => { + NotifsHandlerOut::ProtocolError { error, .. } => { debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", source, error); // A severe protocol error happens when we detect a "bad" node, such as a node on @@ -950,7 +1072,7 @@ impl NetworkBehaviour for LegacyProto { _params: &mut impl PollParameters, ) -> Poll< NetworkBehaviourAction< - CustomProtoHandlerIn, + NotifsHandlerIn, Self::OutEvent, >, > { @@ -1005,7 +1127,7 @@ impl NetworkBehaviour for LegacyProto { debug!(target: "sub-libp2p", "Handler({:?}) <= Enable now that ban has expired", peer_id); self.events.push(NetworkBehaviourAction::SendEvent { peer_id: peer_id.clone(), - event: CustomProtoHandlerIn::Enable, + event: NotifsHandlerIn::Enable, }); *peer_state = PeerState::Enabled { connected_point, open }; } diff --git a/client/cli/src/traits.rs b/client/network/src/protocol/generic_proto/handler.rs similarity index 73% rename from client/cli/src/traits.rs rename to client/network/src/protocol/generic_proto/handler.rs index 96216a172b9700059f7ca4622cc4be9d582c93fa..e97176cfbbfbb98cfa6f1f1d2f25dc9f5f777990 100644 --- a/client/cli/src/traits.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -1,4 +1,4 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. +// Copyright 2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. // Substrate is free software: you can redistribute it and/or modify @@ -14,10 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::params::SharedParams; +pub use self::group::{NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}; -/// Supports getting common params. -pub trait GetSharedParams { - /// Returns shared params if any. - fn shared_params(&self) -> Option<&SharedParams>; -} +mod group; +mod legacy; +mod notif_in; +mod notif_out; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs new file mode 100644 index 0000000000000000000000000000000000000000..d6d9919d3e14df0c2261731cddb0811aa291f920 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -0,0 +1,523 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for both incoming +//! and outgoing substreams for all gossiping protocols together. +//! +//! This is the main implementation of `ProtocolsHandler` in this crate, that handles all the +//! protocols that are Substrate-related and outside of the scope of libp2p. +//! +//! # Usage +//! +//! The handler can be in one of the following states: `Initial`, `Enabled`, `Disabled`. +//! +//! The `Initial` state is the state that the handler initially is in. It is a temporary state +//! during which the user must either enable or disable the handler. After that, the handler stays +//! either enabled or disabled. +//! +//! On the wire, we try to open the following substreams: +//! +//! - One substream for each notification protocol passed as parameter to the +//! `NotifsHandlerProto::new` function. +//! - One "legacy" substream used for anything non-related to gossiping, and used as a fallback +//! in case the notification protocol can't be opened. +//! +//! When the handler is in the `Enabled` state, we immediately open and try to maintain all the +//! aforementioned substreams. When the handler is in the `Disabled` state, we immediately close +//! (or abort opening) all these substreams. It is intended that in the future we allow states in +//! which some protocols are open and not others. Symmetrically, we allow incoming +//! Substrate-related substreams if and only if we are in the `Enabled` state. +//! +//! The user has the choice between sending a message with `SendNotification`, to send a +//! notification, and `SendLegacy`, to send any other kind of message. +//! + +use crate::protocol::generic_proto::{ + handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, + handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, + handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, + upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, +}; +use crate::protocol::message::generic::{Message as GenericMessage, ConsensusMessage}; + +use bytes::BytesMut; +use codec::Encode as _; +use libp2p::core::{either::{EitherError, EitherOutput}, ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{EitherUpgrade, UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::error; +use sp_runtime::ConsensusEngineId; +use std::{borrow::Cow, error, io, task::{Context, Poll}}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsHandler`]. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandlerProto { + /// Prototypes for handlers for inbound substreams. + in_handlers: Vec<(NotifsInHandlerProto, ConsensusEngineId)>, + + /// Prototypes for handlers for outbound substreams. + out_handlers: Vec<(NotifsOutHandlerProto, ConsensusEngineId)>, + + /// Prototype for handler for backwards-compatibility. + legacy: LegacyProtoHandlerProto, +} + +/// The actual handler once the connection has been established. +/// +/// See the documentation at the module level for more information. +pub struct NotifsHandler { + /// Handlers for inbound substreams. + in_handlers: Vec<(NotifsInHandler, ConsensusEngineId)>, + + /// Handlers for outbound substreams. + out_handlers: Vec<(NotifsOutHandler, ConsensusEngineId)>, + + /// Handler for backwards-compatibility. + legacy: LegacyProtoHandler, + + /// State of this handler. + enabled: EnabledState, + + /// If we receive inbound substream requests while in initialization mode, + /// we push the corresponding index here and process them when the handler + /// gets enabled/disabled. + pending_in: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +enum EnabledState { + Initial, + Enabled, + Disabled, +} + +impl IntoProtocolsHandler for NotifsHandlerProto { + type Handler = NotifsHandler; + + fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { + let in_handlers = self.in_handlers.iter() + .map(|(h, _)| h.inbound_protocol()) + .collect::>(); + + SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) + } + + fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { + NotifsHandler { + in_handlers: self.in_handlers + .into_iter() + .map(|(p, e)| (p.into_handler(remote_peer_id, connected_point), e)) + .collect(), + out_handlers: self.out_handlers + .into_iter() + .map(|(p, e)| (p.into_handler(remote_peer_id, connected_point), e)) + .collect(), + legacy: self.legacy.into_handler(remote_peer_id, connected_point), + enabled: EnabledState::Initial, + pending_in: Vec::new(), + } + } +} + +/// Event that can be received by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerIn { + /// The node should start using custom protocols. + Enable, + + /// The node should stop using custom protocols. + Disable, + + /// Sends a message through the custom protocol substream. + /// + /// > **Note**: This must **not** be an encoded `ConsensusMessage` message. + SendLegacy { + /// The message to send. + message: Vec, + }, + + /// Sends a notifications message. + SendNotification { + /// Name of the protocol for the message. + /// + /// Must match one of the registered protocols. For backwards-compatibility reasons, if + /// the remote doesn't support this protocol, we use the legacy substream to send a + /// `ConsensusMessage` message. + protocol_name: Cow<'static, [u8]>, + + /// The engine ID to use, in case we need to send this message over the legacy substream. + /// + /// > **Note**: Ideally this field wouldn't be necessary, and we would deduce the engine + /// > ID from the existing handlers. However, it is possible (especially in test + /// > situations) that we open connections before all the notification protocols + /// > have been registered, in which case we always rely on the legacy substream. + engine_id: ConsensusEngineId, + + /// The message to send. + message: Vec, + }, +} + +/// Event that can be emitted by a `NotifsHandler`. +#[derive(Debug)] +pub enum NotifsHandlerOut { + /// Opened the substreams with the remote. + Open, + + /// Closed the substreams with the remote. + Closed { + /// Reason why the substream closed, for diagnostic purposes. + reason: Cow<'static, str>, + }, + + /// Received a non-gossiping message on the legacy substream. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + Notification { + /// Engine corresponding to the message. + protocol_name: Cow<'static, [u8]>, + + /// For legacy reasons, the name to use if we had received the message from the legacy + /// substream. + engine_id: ConsensusEngineId, + + /// Message that has been received. + /// + /// If `protocol_name` is `None`, this decodes to a `Message`. If `protocol_name` is `Some`, + /// this is directly a gossiping message. + message: BytesMut, + }, + + /// A substream to the remote is clogged. The send buffer is very large, and we should print + /// a diagnostic message and/or avoid sending more data. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, +} + +impl NotifsHandlerProto { + /// Builds a new handler. + pub fn new(legacy: RegisteredProtocol, list: impl Into, ConsensusEngineId, Vec)>>) -> Self { + let list = list.into(); + + NotifsHandlerProto { + in_handlers: list.clone().into_iter().map(|(p, e, _)| (NotifsInHandlerProto::new(p), e)).collect(), + out_handlers: list.clone().into_iter().map(|(p, e, _)| (NotifsOutHandlerProto::new(p), e)).collect(), + legacy: LegacyProtoHandlerProto::new(legacy), + } + } +} + +impl ProtocolsHandler for NotifsHandler { + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = EitherError< + EitherError< + ::Error, + ::Error, + >, + ::Error, + >; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = EitherUpgrade; + // Index within the `out_handlers`; None for legacy + type OutboundOpenInfo = Option; + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_handlers = self.in_handlers.iter() + .map(|h| h.0.listen_protocol().into_upgrade().1) + .collect::>(); + + let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); + SubstreamProtocol::new(proto) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output + ) { + match out { + EitherOutput::First((out, num)) => + self.in_handlers[num].0.inject_fully_negotiated_inbound(out), + EitherOutput::Second(out) => + self.legacy.inject_fully_negotiated_inbound(out), + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + num: Self::OutboundOpenInfo + ) { + match (out, num) { + (EitherOutput::First(out), Some(num)) => + self.out_handlers[num].0.inject_fully_negotiated_outbound(out, ()), + (EitherOutput::Second(out), None) => + self.legacy.inject_fully_negotiated_outbound(out, ()), + _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Enable => { + self.enabled = EnabledState::Enabled; + self.legacy.inject_event(LegacyProtoHandlerIn::Enable); + for (handler, _) in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Enable { + initial_message: vec![] + }); + } + for num in self.pending_in.drain(..) { + self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Accept(vec![])); + } + }, + NotifsHandlerIn::Disable => { + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + // The notifications protocols start in the disabled state. If we were in the + // "Initial" state, then we shouldn't disable the notifications protocols again. + if self.enabled != EnabledState::Initial { + for (handler, _) in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + } + self.enabled = EnabledState::Disabled; + for num in self.pending_in.drain(..) { + self.in_handlers[num].0.inject_event(NotifsInHandlerIn::Refuse); + } + }, + NotifsHandlerIn::SendLegacy { message } => + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), + NotifsHandlerIn::SendNotification { message, engine_id, protocol_name } => { + for (handler, ngn_id) in &mut self.out_handlers { + if handler.protocol_name() != &protocol_name[..] { + break; + } + + if handler.is_open() { + handler.inject_event(NotifsOutHandlerIn::Send(message)); + return; + } else { + debug_assert_eq!(engine_id, *ngn_id); + } + } + + let message = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { + engine_id, + data: message, + }); + + self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { + message: message.encode() + }); + }, + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: Option, + err: ProtocolsHandlerUpgrErr> + ) { + match (err, num) { + (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timeout + ), + (ProtocolsHandlerUpgrErr::Timeout, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), + (ProtocolsHandlerUpgrErr::Timer, Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Timer + ), + (ProtocolsHandlerUpgrErr::Timer, None) => + self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), Some(num)) => + self.out_handlers[num].0.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(err))), None) => + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) + ), + _ => error!("inject_dial_upgrade_error called with bad parameters"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + // Iterate over each handler and return the maximum value. + + let mut ret = self.legacy.connection_keep_alive(); + if ret.is_yes() { + return KeepAlive::Yes; + } + + for (handler, _) in &self.in_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + for (handler, _) in &self.out_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { ret = val; } + } + + ret + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + for (handler_num, (handler, engine_id)) in self.in_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => + error!("Incoming substream handler tried to open a substream"), + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => + match self.enabled { + EnabledState::Initial => self.pending_in.push(handler_num), + EnabledState::Enabled => + handler.inject_event(NotifsInHandlerIn::Accept(vec![])), + EnabledState::Disabled => + handler.inject_event(NotifsInHandlerIn::Refuse), + }, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { + // Note that right now the legacy substream has precedence over + // everything. If it is not open, then we consider that nothing is open. + if self.legacy.is_open() { + let msg = NotifsHandlerOut::Notification { + message, + engine_id: *engine_id, + protocol_name: handler.protocol_name().to_owned().into(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); + } + }, + } + } + } + + for (handler_num, (handler, _)) in self.out_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::A), + info: Some(handler_num), + }), + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + + // At the moment we don't actually care whether any notifications protocol + // opens or closes. + // Whether our communications with the remote are open or closed entirely + // depends on the legacy substream, because as long as we are open the user of + // this struct might try to send legacy protocol messages which we need to + // deliver for things to work properly. + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, + } + } + } + + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::B), + info: None, + }), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { .. }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Open + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { reason }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Closed { reason } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::Clogged { messages } + )), + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error } + )), + ProtocolsHandlerEvent::Close(err) => + return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), + } + } + + Poll::Pending + } +} diff --git a/client/network/src/protocol/legacy_proto/handler.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs similarity index 90% rename from client/network/src/protocol/legacy_proto/handler.rs rename to client/network/src/protocol/generic_proto/handler/legacy.rs index e3490993dd46d05549e656b124883f1976a4d062..a2d2fc9246d1c79b761d5e5eb1ce375fba33b49a 100644 --- a/client/network/src/protocol/legacy_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; +use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; use bytes::BytesMut; use futures::prelude::*; use futures_timer::Delay; @@ -37,7 +37,7 @@ use std::{pin::Pin, task::{Context, Poll}}; /// /// Every time a connection with a remote starts, an instance of this struct is created and /// sent to a background task dedicated to this connection. Once the connection is established, -/// it is turned into a `CustomProtoHandler`. It then handles all communications that are specific +/// it is turned into a `LegacyProtoHandler`. It then handles all communications that are specific /// to Substrate on that single connection. /// /// Note that there can be multiple instance of this struct simultaneously for same peer. However @@ -87,29 +87,29 @@ use std::{pin::Pin, task::{Context, Poll}}; /// We consider that we are now "closed" if the remote closes all the existing substreams. /// Re-opening it can then be performed by closing all active substream and re-opening one. /// -pub struct CustomProtoHandlerProto { +pub struct LegacyProtoHandlerProto { /// Configuration for the protocol upgrade to negotiate. protocol: RegisteredProtocol, } -impl CustomProtoHandlerProto { - /// Builds a new `CustomProtoHandlerProto`. +impl LegacyProtoHandlerProto { + /// Builds a new `LegacyProtoHandlerProto`. pub fn new(protocol: RegisteredProtocol) -> Self { - CustomProtoHandlerProto { + LegacyProtoHandlerProto { protocol, } } } -impl IntoProtocolsHandler for CustomProtoHandlerProto { - type Handler = CustomProtoHandler; +impl IntoProtocolsHandler for LegacyProtoHandlerProto { + type Handler = LegacyProtoHandler; fn inbound_protocol(&self) -> RegisteredProtocol { self.protocol.clone() } fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - CustomProtoHandler { + LegacyProtoHandler { protocol: self.protocol, endpoint: connected_point.to_endpoint(), remote_peer_id: remote_peer_id.clone(), @@ -123,7 +123,7 @@ impl IntoProtocolsHandler for CustomProtoHandlerProto { } /// The actual handler once the connection has been established. -pub struct CustomProtoHandler { +pub struct LegacyProtoHandler { /// Configuration for the protocol upgrade to negotiate. protocol: RegisteredProtocol, @@ -142,7 +142,7 @@ pub struct CustomProtoHandler { /// /// This queue must only ever be modified to insert elements at the back, or remove the first /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, } /// State of the handler. @@ -195,9 +195,9 @@ enum ProtocolState { Poisoned, } -/// Event that can be received by a `CustomProtoHandler`. +/// Event that can be received by a `LegacyProtoHandler`. #[derive(Debug)] -pub enum CustomProtoHandlerIn { +pub enum LegacyProtoHandlerIn { /// The node should start using custom protocols. Enable, @@ -211,9 +211,9 @@ pub enum CustomProtoHandlerIn { }, } -/// Event that can be emitted by a `CustomProtoHandler`. +/// Event that can be emitted by a `LegacyProtoHandler`. #[derive(Debug)] -pub enum CustomProtoHandlerOut { +pub enum LegacyProtoHandlerOut { /// Opened a custom protocol with the remote. CustomProtocolOpen { /// Version of the protocol that has been opened. @@ -248,7 +248,19 @@ pub enum CustomProtoHandlerOut { }, } -impl CustomProtoHandler { +impl LegacyProtoHandler { + /// Returns true if the legacy substream is currently open. + pub fn is_open(&self) -> bool { + match &self.state { + ProtocolState::Init { substreams, .. } => !substreams.is_empty(), + ProtocolState::Opening { .. } => false, + ProtocolState::Normal { substreams, .. } => !substreams.is_empty(), + ProtocolState::Disabled { .. } => false, + ProtocolState::KillAsap => false, + ProtocolState::Poisoned => false, + } + } + /// Enables the handler. fn enable(&mut self) { self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { @@ -271,7 +283,7 @@ impl CustomProtoHandler { } } else { - let event = CustomProtoHandlerOut::CustomProtocolOpen { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: incoming[0].protocol_version() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); @@ -325,7 +337,7 @@ impl CustomProtoHandler { /// Polls the state for events. Optionally returns an event to produce. #[must_use] fn poll_state(&mut self, cx: &mut Context) - -> Option> { + -> Option> { match mem::replace(&mut self.state, ProtocolState::Poisoned) { ProtocolState::Poisoned => { error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", @@ -352,7 +364,7 @@ impl CustomProtoHandler { match Pin::new(&mut deadline).poll(cx) { Poll::Ready(()) => { deadline = Delay::new(Duration::from_secs(60)); - let event = CustomProtoHandlerOut::ProtocolError { + let event = LegacyProtoHandlerOut::ProtocolError { is_severe: true, error: "Timeout when opening protocol".to_string().into(), }; @@ -372,7 +384,7 @@ impl CustomProtoHandler { match Pin::new(&mut substream).poll_next(cx) { Poll::Pending => substreams.push(substream), Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = CustomProtoHandlerOut::CustomMessage { + let event = LegacyProtoHandlerOut::CustomMessage { message }; substreams.push(substream); @@ -380,7 +392,7 @@ impl CustomProtoHandler { return Some(ProtocolsHandlerEvent::Custom(event)); }, Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { messages }))) => { - let event = CustomProtoHandlerOut::Clogged { + let event = LegacyProtoHandlerOut::Clogged { messages, }; substreams.push(substream); @@ -390,7 +402,7 @@ impl CustomProtoHandler { Poll::Ready(None) => { shutdown.push(substream); if substreams.is_empty() { - let event = CustomProtoHandlerOut::CustomProtocolClosed { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: "All substreams have been closed by the remote".into(), }; self.state = ProtocolState::Disabled { @@ -402,7 +414,7 @@ impl CustomProtoHandler { } Poll::Ready(Some(Err(err))) => { if substreams.is_empty() { - let event = CustomProtoHandlerOut::CustomProtocolClosed { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { reason: format!("Error on the last substream: {:?}", err).into(), }; self.state = ProtocolState::Disabled { @@ -466,7 +478,7 @@ impl CustomProtoHandler { } ProtocolState::Opening { .. } => { - let event = CustomProtoHandlerOut::CustomProtocolOpen { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { version: substream.protocol_version() }; self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); @@ -503,9 +515,9 @@ impl CustomProtoHandler { } } -impl ProtocolsHandler for CustomProtoHandler { - type InEvent = CustomProtoHandlerIn; - type OutEvent = CustomProtoHandlerOut; +impl ProtocolsHandler for LegacyProtoHandler { + type InEvent = LegacyProtoHandlerIn; + type OutEvent = LegacyProtoHandlerOut; type Error = ConnectionKillError; type InboundProtocol = RegisteredProtocol; type OutboundProtocol = RegisteredProtocol; @@ -530,11 +542,11 @@ impl ProtocolsHandler for CustomProtoHandler { self.inject_fully_negotiated(proto); } - fn inject_event(&mut self, message: CustomProtoHandlerIn) { + fn inject_event(&mut self, message: LegacyProtoHandlerIn) { match message { - CustomProtoHandlerIn::Disable => self.disable(), - CustomProtoHandlerIn::Enable => self.enable(), - CustomProtoHandlerIn::SendCustomMessage { message } => + LegacyProtoHandlerIn::Disable => self.disable(), + LegacyProtoHandlerIn::Enable => self.enable(), + LegacyProtoHandlerIn::SendCustomMessage { message } => self.send_message(message), } } @@ -546,7 +558,7 @@ impl ProtocolsHandler for CustomProtoHandler { _ => false, }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(CustomProtoHandlerOut::ProtocolError { + self.events_queue.push(ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error: Box::new(err), })); @@ -587,9 +599,9 @@ impl ProtocolsHandler for CustomProtoHandler { } } -impl fmt::Debug for CustomProtoHandler { +impl fmt::Debug for LegacyProtoHandler { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("CustomProtoHandler") + f.debug_struct("LegacyProtoHandler") .finish() } } diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs new file mode 100644 index 0000000000000000000000000000000000000000..4e16fb1af419f4aebb2748b09c477119f1165309 --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -0,0 +1,256 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for ingoing +//! substreams for a single gossiping protocol. +//! +//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple +//! > protocols, you need to create multiple instances and group them. +//! + +use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; +use bytes::BytesMut; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::{error, warn}; +use smallvec::SmallVec; +use std::{borrow::Cow, fmt, pin::Pin, str, task::{Context, Poll}}; + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsInHandler`]. +pub struct NotifsInHandlerProto { + /// Configuration for the protocol upgrade to negotiate. + in_protocol: NotificationsIn, +} + +/// The actual handler once the connection has been established. +pub struct NotifsInHandler { + /// Configuration for the protocol upgrade to negotiate for inbound substreams. + in_protocol: NotificationsIn, + + /// Substream that is open with the remote. + substream: Option>, + + /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and + /// `Closed` messages in a row without the handler having time to respond with `Accept` or + /// `Refuse`. + /// + /// In order to keep the state consistent, we increment this variable every time an + /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. + pending_accept_refuses: usize, + + /// Queue of events to send to the outside. + /// + /// This queue is only ever modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, +} + +/// Event that can be received by a `NotifsInHandler`. +#[derive(Debug)] +pub enum NotifsInHandlerIn { + /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send + /// to the remote. + /// + /// After sending this to the handler, the substream is now considered open and `Notif` events + /// can be received. + Accept(Vec), + + /// Can be sent back as a response to an `OpenRequest`. + Refuse, +} + +/// Event that can be emitted by a `NotifsInHandler`. +#[derive(Debug)] +pub enum NotifsInHandlerOut { + /// The remote wants to open a substream. Contains the initial message sent by the remote + /// when the substream has been opened. + /// + /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent + /// back even if a `Closed` is received. + OpenRequest(Vec), + + /// The notifications substream has been closed by the remote. In order to avoid race + /// conditions, this does **not** cancel any previously-sent `OpenRequest`. + Closed, + + /// Received a message on the notifications substream. + /// + /// Can only happen after an `Accept` and before a `Closed`. + Notif(BytesMut), +} + +impl NotifsInHandlerProto { + /// Builds a new `NotifsInHandlerProto`. + pub fn new( + protocol_name: impl Into> + ) -> Self { + NotifsInHandlerProto { + in_protocol: NotificationsIn::new(protocol_name), + } + } +} + +impl IntoProtocolsHandler for NotifsInHandlerProto { + type Handler = NotifsInHandler; + + fn inbound_protocol(&self) -> NotificationsIn { + self.in_protocol.clone() + } + + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsInHandler { + in_protocol: self.in_protocol, + substream: None, + pending_accept_refuses: 0, + events_queue: SmallVec::new(), + } + } +} + +impl NotifsInHandler { + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + self.in_protocol.protocol_name() + } +} + +impl ProtocolsHandler for NotifsInHandler { + type InEvent = NotifsInHandlerIn; + type OutEvent = NotifsInHandlerOut; + type Error = void::Void; + type InboundProtocol = NotificationsIn; + type OutboundProtocol = DeniedUpgrade; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.in_protocol.clone()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (msg, proto): >::Output + ) { + if self.substream.is_some() { + warn!( + target: "sub-libp2p", + "Received duplicate inbound notifications substream for {:?}", + str::from_utf8(self.in_protocol.protocol_name()), + ); + return; + } + + self.substream = Some(proto); + self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); + self.pending_accept_refuses = self.pending_accept_refuses + .checked_add(1) + .unwrap_or_else(|| { + error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); + usize::max_value() + }); + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + _: Self::OutboundOpenInfo + ) { + // We never emit any outgoing substream. + void::unreachable(out) + } + + fn inject_event(&mut self, message: NotifsInHandlerIn) { + self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { + Some(v) => v, + None => { + error!( + target: "sub-libp2p", + "Inconsistent state: received Accept/Refuse when no pending request exists" + ); + return; + } + }; + + // If we send multiple `OpenRequest`s in a row, we will receive back multiple + // `Accept`/`Refuse` messages. All of them are obsolete except the last one. + if self.pending_accept_refuses != 0 { + return; + } + + match (message, self.substream.as_mut()) { + (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), + (NotifsInHandlerIn::Accept(_), None) => {}, + (NotifsInHandlerIn::Refuse, _) => self.substream = None, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { + error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); + } + + fn connection_keep_alive(&self) -> KeepAlive { + if self.substream.is_some() { + KeepAlive::Yes + } else { + KeepAlive::No + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Poll::Ready(event) + } + + match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { + None | Some(Poll::Pending) => {}, + Some(Poll::Ready(Some(Ok(msg)))) => + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))), + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { + self.substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + }, + } + + Poll::Pending + } +} + +impl fmt::Debug for NotifsInHandler { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsInHandler") + .field("substream_open", &self.substream.is_some()) + .finish() + } +} diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs new file mode 100644 index 0000000000000000000000000000000000000000..8c64491d997171df73606c2852765c10c9f3f21b --- /dev/null +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -0,0 +1,395 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Implementations of the `IntoProtocolsHandler` and `ProtocolsHandler` traits for outgoing +//! substreams of a single gossiping protocol. +//! +//! > **Note**: Each instance corresponds to a single protocol. In order to support multiple +//! > protocols, you need to create multiple instances and group them. +//! + +use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; +use futures::prelude::*; +use libp2p::core::{ConnectedPoint, PeerId}; +use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::swarm::{ + ProtocolsHandler, ProtocolsHandlerEvent, + IntoProtocolsHandler, + KeepAlive, + ProtocolsHandlerUpgrErr, + SubstreamProtocol, + NegotiatedSubstream, +}; +use log::error; +use smallvec::SmallVec; +use std::{borrow::Cow, fmt, mem, pin::Pin, task::{Context, Poll}, time::Duration}; +use wasm_timer::Instant; + +/// Maximum duration to open a substream and receive the handshake message. After that, we +/// consider that we failed to open the substream. +const OPEN_TIMEOUT: Duration = Duration::from_secs(10); +/// After successfully establishing a connection with the remote, we keep the connection open for +/// at least this amount of time in order to give the rest of the code the chance to notify us to +/// open substreams. +const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); + +/// Implements the `IntoProtocolsHandler` trait of libp2p. +/// +/// Every time a connection with a remote starts, an instance of this struct is created and +/// sent to a background task dedicated to this connection. Once the connection is established, +/// it is turned into a [`NotifsOutHandler`]. +/// +/// See the documentation of [`NotifsOutHandler`] for more information. +pub struct NotifsOutHandlerProto { + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, [u8]>, +} + +impl NotifsOutHandlerProto { + /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the + /// notifications substream. + pub fn new(protocol_name: impl Into>) -> Self { + NotifsOutHandlerProto { + protocol_name: protocol_name.into(), + } + } +} + +impl IntoProtocolsHandler for NotifsOutHandlerProto { + type Handler = NotifsOutHandler; + + fn inbound_protocol(&self) -> DeniedUpgrade { + DeniedUpgrade + } + + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsOutHandler { + protocol_name: self.protocol_name, + when_connection_open: Instant::now(), + state: State::Disabled, + events_queue: SmallVec::new(), + } + } +} + +/// Handler for an outbound notification substream. +/// +/// When a connection is established, this handler starts in the "disabled" state, meaning that +/// no substream will be open. +/// +/// One can try open a substream by sending an [`NotifsOutHandlerIn::Enable`] message to the +/// handler. Once done, the handler will try to establish then maintain an outbound substream with +/// the remote for the purpose of sending notifications to it. +pub struct NotifsOutHandler { + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, [u8]>, + + /// Relationship with the node we're connected to. + state: State, + + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, +} + +/// Our relationship with the node we're connected to. +enum State { + /// The handler is disabled and idle. No substream is open. + Disabled, + + /// The handler is disabled. A substream is still open and needs to be closed. + /// + /// > **Important**: Having this state means that `poll_close` has been called at least once, + /// > but the `Sink` API is unclear about whether or not the stream can then + /// > be recovered. Because of that, we must never switch from the + /// > `DisabledOpen` state to the `Open` state while keeping the same substream. + DisabledOpen(NotificationsOutSubstream), + + /// The handler is disabled but we are still trying to open a substream with the remote. + /// + /// If the handler gets enabled again, we can immediately switch to `Opening`. + DisabledOpening, + + /// The handler is enabled and we are trying to open a substream with the remote. + Opening { + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// The handler is enabled. We have tried opening a substream in the past but the remote + /// refused it. + Refused, + + /// The handler is enabled and substream is open. + Open { + /// Substream that is currently open. + substream: NotificationsOutSubstream, + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// Poisoned state. Shouldn't be found in the wild. + Poisoned, +} + +/// Event that can be received by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerIn { + /// Enables the notifications substream for this node. The handler will try to maintain a + /// substream with the remote. + Enable { + /// Initial message to send to remote nodes when we open substreams. + initial_message: Vec, + }, + + /// Disables the notifications substream for this node. This is the default state. + Disable, + + /// Sends a message on the notifications substream. Ignored if the substream isn't open. + /// + /// It is only valid to send this if the notifications substream has been enabled. + Send(Vec), +} + +/// Event that can be emitted by a `NotifsOutHandler`. +#[derive(Debug)] +pub enum NotifsOutHandlerOut { + /// The notifications substream has been accepted by the remote. + Open { + /// Handshake message sent by the remote after we opened the substream. + handshake: Vec, + }, + + /// The notifications substream has been closed by the remote. + Closed, + + /// We tried to open a notifications substream, but the remote refused it. + /// + /// Can only happen if we're in a closed state. + Refused, +} + +impl NotifsOutHandler { + /// Returns true if the substream is currently open. + pub fn is_open(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => true, + State::Opening { .. } => false, + State::Refused => false, + State::Open { .. } => true, + State::Poisoned => false, + } + } + + /// Returns the name of the protocol that we negotiate. + pub fn protocol_name(&self) -> &[u8] { + &self.protocol_name + } +} + +impl ProtocolsHandler for NotifsOutHandler { + type InEvent = NotifsOutHandlerIn; + type OutEvent = NotifsOutHandlerOut; + type Error = void::Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = NotificationsOut; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output + ) { + // We should never reach here. `proto` is a `Void`. + void::unreachable(proto) + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake_msg, substream): >::Output, + _: () + ) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Opening { initial_message } => { + let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + self.state = State::Open { substream, initial_message }; + }, + // If the handler was disabled while we were negotiating the protocol, immediately + // close it. + State::DisabledOpening => self.state = State::DisabledOpen(substream), + + // Any other situation should never happen. + State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => + error!("State mismatch in notifications handler: substream already open"), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + fn inject_event(&mut self, message: NotifsOutHandlerIn) { + match message { + NotifsOutHandlerIn::Enable { initial_message } => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => { + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); + self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + self.state = State::Opening { initial_message }; + }, + State::DisabledOpening => self.state = State::Opening { initial_message }, + State::DisabledOpen(mut sub) => { + // As documented above, in this state we have already called `poll_close` + // once on the substream, and it is unclear whether the substream can then + // be recovered. When in doubt, let's drop the existing substream and + // open a new one. + if sub.close().now_or_never().is_none() { + log::warn!( + target: "sub-libp2p", + "Improperly closed outbound notifications substream" + ); + } + + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); + self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + self.state = State::Opening { initial_message }; + }, + State::Opening { .. } | State::Refused | State::Open { .. } => + error!("Tried to enable notifications handler that was already enabled"), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + NotifsOutHandlerIn::Disable => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => + error!("Tried to disable notifications handler that was already disabled"), + State::Opening { .. } => self.state = State::DisabledOpening, + State::Refused => self.state = State::Disabled, + State::Open { substream, .. } => self.state = State::DisabledOpen(substream), + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + NotifsOutHandlerIn::Send(msg) => + if let State::Open { substream, .. } = &mut self.state { + if let Some(Ok(_)) = substream.send(msg).now_or_never() { + } else { + log::warn!( + target: "sub-libp2p", + "Failed to push message to queue, dropped it" + ); + } + } else { + // This is an API misuse. + log::warn!( + target: "sub-libp2p", + "Tried to send a notification on a disabled handler" + ); + }, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => {}, + State::DisabledOpen(_) | State::Refused | State::Open { .. } => + error!("State mismatch in NotificationsOut"), + State::Opening { .. } => { + self.state = State::Refused; + let ev = NotifsOutHandlerOut::Refused; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + }, + State::DisabledOpening => self.state = State::Disabled, + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + match self.state { + // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the + // connection open no matter what, in order to avoid closing and reopening + // connections all the time. + State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), + State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, + State::Refused | State::Poisoned => KeepAlive::No, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll> { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Poll::Ready(event); + } + + match &mut self.state { + State::Open { substream, initial_message } => + match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {}, + Poll::Ready(Err(_)) => { + // We try to re-open a substream. + let initial_message = mem::replace(initial_message, Vec::new()); + self.state = State::Opening { initial_message: initial_message.clone() }; + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); + self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + } + }, + + State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { + Poll::Pending => {}, + Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { + self.state = State::Disabled; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + }, + }, + + _ => {} + } + + Poll::Pending + } +} + +impl fmt::Debug for NotifsOutHandler { + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsOutHandler") + .field("open", &self.is_open()) + .finish() + } +} diff --git a/client/network/src/protocol/legacy_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs similarity index 86% rename from client/network/src/protocol/legacy_proto/tests.rs rename to client/network/src/protocol/generic_proto/tests.rs index 89b0854d9081f82c80e8f0dab8da0e138ebb4fca..b8436e2c7f704bf2c4d5664c6908fce96c80d65c 100644 --- a/client/network/src/protocol/legacy_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -25,8 +25,9 @@ use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction}; use libp2p::{PeerId, Multiaddr, Transport}; use rand::seq::SliceRandom; use std::{error, io, task::Context, task::Poll, time::Duration}; -use crate::message::Message; -use crate::protocol::legacy_proto::{LegacyProto, LegacyProtoOut}; +use std::collections::HashSet; +use crate::message::{generic::BlockResponse, Message}; +use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; use sp_test_primitives::Block; /// Builds two nodes that have each other as bootstrap nodes. @@ -81,7 +82,7 @@ fn build_nodes() -> (Swarm, Swarm) { }); let behaviour = CustomProtoWithAddr { - inner: LegacyProto::new(&b"test"[..], &[1], peerset), + inner: GenericProto::new(&b"test"[..], &[1], peerset), addrs: addrs .iter() .enumerate() @@ -111,12 +112,12 @@ fn build_nodes() -> (Swarm, Swarm) { /// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. struct CustomProtoWithAddr { - inner: LegacyProto, + inner: GenericProto, addrs: Vec<(PeerId, Multiaddr)>, } impl std::ops::Deref for CustomProtoWithAddr { - type Target = LegacyProto; + type Target = GenericProto; fn deref(&self) -> &Self::Target { &self.inner @@ -130,8 +131,8 @@ impl std::ops::DerefMut for CustomProtoWithAddr { } impl NetworkBehaviour for CustomProtoWithAddr { - type ProtocolsHandler = ::ProtocolsHandler; - type OutEvent = ::OutEvent; + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = ::OutEvent; fn new_handler(&mut self) -> Self::ProtocolsHandler { self.inner.new_handler() @@ -223,11 +224,14 @@ fn two_nodes_transfer_lots_of_packets() { let fut1 = future::poll_fn(move |cx| -> Poll<()> { loop { match ready!(service1.poll_next_unpin(cx)) { - Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { for n in 0 .. NUM_PACKETS { service1.send_packet( &peer_id, - Message::::ChainSpecific(vec![(n % 256) as u8]).encode() + Message::::BlockResponse(BlockResponse { + id: n as _, + blocks: Vec::new(), + }).encode() ); } }, @@ -240,11 +244,11 @@ fn two_nodes_transfer_lots_of_packets() { let fut2 = future::poll_fn(move |cx| { loop { match ready!(service2.poll_next_unpin(cx)) { - Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {}, - Some(LegacyProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + Some(GenericProtoOut::CustomMessage { message, .. }) => { match Message::::decode(&mut &message[..]).unwrap() { - Message::::ChainSpecific(message) => { - assert_eq!(message.len(), 1); + Message::::BlockResponse(BlockResponse { id: _, blocks }) => { + assert!(blocks.is_empty()); packet_counter += 1; if packet_counter == NUM_PACKETS { return Poll::Ready(()) @@ -270,9 +274,21 @@ fn basic_two_nodes_requests_in_parallel() { // Generate random messages with or without a request id. let mut to_send = { let mut to_send = Vec::new(); + let mut existing_ids = HashSet::new(); for _ in 0..200 { // Note: don't make that number too high or the CPU usage will explode. - let msg = (0..10).map(|_| rand::random::()).collect::>(); - to_send.push(Message::::ChainSpecific(msg)); + let req_id = loop { + let req_id = rand::random::(); + + // ensure uniqueness - odds of randomly sampling collisions + // is unlikely, but possible to cause spurious test failures. + if existing_ids.insert(req_id) { + break req_id; + } + }; + + to_send.push(Message::::BlockResponse( + BlockResponse { id: req_id, blocks: Vec::new() } + )); } to_send }; @@ -285,7 +301,7 @@ fn basic_two_nodes_requests_in_parallel() { let fut1 = future::poll_fn(move |cx| -> Poll<()> { loop { match ready!(service1.poll_next_unpin(cx)) { - Some(LegacyProtoOut::CustomProtocolOpen { peer_id, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { for msg in to_send.drain(..) { service1.send_packet(&peer_id, msg.encode()); } @@ -298,8 +314,8 @@ fn basic_two_nodes_requests_in_parallel() { let fut2 = future::poll_fn(move |cx| { loop { match ready!(service2.poll_next_unpin(cx)) { - Some(LegacyProtoOut::CustomProtocolOpen { .. }) => {}, - Some(LegacyProtoOut::CustomMessage { message, .. }) => { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, + Some(GenericProtoOut::CustomMessage { message, .. }) => { let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); to_receive.remove(pos); if to_receive.is_empty() { @@ -335,7 +351,7 @@ fn reconnect_after_disconnect() { let mut service1_not_ready = false; match service1.poll_next_unpin(cx) { - Poll::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => { + Poll::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { match service1_state { ServiceState::NotConnected => { service1_state = ServiceState::FirstConnec; @@ -347,7 +363,7 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Poll::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => { + Poll::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { match service1_state { ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | @@ -359,7 +375,7 @@ fn reconnect_after_disconnect() { } match service2.poll_next_unpin(cx) { - Poll::Ready(Some(LegacyProtoOut::CustomProtocolOpen { .. })) => { + Poll::Ready(Some(GenericProtoOut::CustomProtocolOpen { .. })) => { match service2_state { ServiceState::NotConnected => { service2_state = ServiceState::FirstConnec; @@ -371,7 +387,7 @@ fn reconnect_after_disconnect() { ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), } }, - Poll::Ready(Some(LegacyProtoOut::CustomProtocolClosed { .. })) => { + Poll::Ready(Some(GenericProtoOut::CustomProtocolClosed { .. })) => { match service2_state { ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, ServiceState::ConnectedAgain| ServiceState::NotConnected | diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/generic_proto/upgrade.rs new file mode 100644 index 0000000000000000000000000000000000000000..36f826336532619479b94876776c65e8636f8c57 --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade.rs @@ -0,0 +1,35 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +pub use self::collec::UpgradeCollec; +pub use self::legacy::{ + RegisteredProtocol, + RegisteredProtocolEvent, + RegisteredProtocolName, + RegisteredProtocolSubstream +}; +pub use self::notifications::{ + NotificationsIn, + NotificationsInSubstream, + NotificationsOut, + NotificationsOutSubstream, + NotificationsHandshakeError, + NotificationsOutError, +}; + +mod collec; +mod legacy; +mod notifications; diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/generic_proto/upgrade/collec.rs new file mode 100644 index 0000000000000000000000000000000000000000..f8d199974940fb1abe6d41829d728243dd73af23 --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade/collec.rs @@ -0,0 +1,97 @@ +// Copyright 2018-2020 Parity Technologies (UK) Ltd. +// +// Permission is hereby granted, free of charge, to any person obtaining a +// copy of this software and associated documentation files (the "Software"), +// to deal in the Software without restriction, including without limitation +// the rights to use, copy, modify, merge, publish, distribute, sublicense, +// and/or sell copies of the Software, and to permit persons to whom the +// Software is furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +// DEALINGS IN THE SOFTWARE. + +use futures::prelude::*; +use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; +use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; + +// TODO: move this to libp2p => https://github.com/libp2p/rust-libp2p/issues/1445 + +/// Upgrade that combines multiple upgrades of the same type into one. Supports all the protocols +/// supported by either sub-upgrade. +#[derive(Debug, Clone)] +pub struct UpgradeCollec(pub Vec); + +impl From> for UpgradeCollec { + fn from(list: Vec) -> Self { + UpgradeCollec(list) + } +} + +impl FromIterator for UpgradeCollec { + fn from_iter>(iter: I) -> Self { + UpgradeCollec(iter.into_iter().collect()) + } +} + +impl UpgradeInfo for UpgradeCollec { + type Info = ProtoNameWithUsize; + type InfoIter = vec::IntoIter; + + fn protocol_info(&self) -> Self::InfoIter { + self.0.iter().enumerate() + .flat_map(|(n, p)| + p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) + .collect::>() + .into_iter() + } +} + +impl InboundUpgrade for UpgradeCollec +where + T: InboundUpgrade, +{ + type Output = (T::Output, usize); + type Error = (T::Error, usize); + type Future = FutWithUsize; + + fn upgrade_inbound(mut self, sock: C, info: Self::Info) -> Self::Future { + let fut = self.0.remove(info.1).upgrade_inbound(sock, info.0); + FutWithUsize(fut, info.1) + } +} + +/// Groups a `ProtocolName` with a `usize`. +#[derive(Debug, Clone)] +pub struct ProtoNameWithUsize(T, usize); + +impl ProtocolName for ProtoNameWithUsize { + fn protocol_name(&self) -> &[u8] { + self.0.protocol_name() + } +} + +/// Equivalent to `fut.map_ok(|v| (v, num)).map_err(|e| (e, num))`, where `fut` and `num` are +/// the two fields of this struct. +#[pin_project::pin_project] +pub struct FutWithUsize(#[pin] T, usize); + +impl>, O, E> Future for FutWithUsize { + type Output = Result<(O, usize), (E, usize)>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); + match Future::poll(this.0, cx) { + Poll::Ready(Ok(v)) => Poll::Ready(Ok((v, *this.1))), + Poll::Ready(Err(e)) => Poll::Ready(Err((e, *this.1))), + Poll::Pending => Poll::Pending, + } + } +} diff --git a/client/network/src/protocol/legacy_proto/upgrade.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs similarity index 100% rename from client/network/src/protocol/legacy_proto/upgrade.rs rename to client/network/src/protocol/generic_proto/upgrade/legacy.rs diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs new file mode 100644 index 0000000000000000000000000000000000000000..ddc07b5d6f3d6b2ffefe8fec47cee78768a1495a --- /dev/null +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -0,0 +1,622 @@ +// Copyright 2019-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// Notifications protocol. +/// +/// The Substrate notifications protocol consists in the following: +/// +/// - Node A opens a substream to node B and sends a message which contains some protocol-specific +/// higher-level logic. This message is prefixed with a variable-length integer message length. +/// This message can be empty, in which case `0` is sent. +/// - If node B accepts the substream, it sends back a message with the same properties. +/// Afterwards, the sending side of B is closed. +/// - If instead B refuses the connection (which typically happens because no empty slot is +/// available), then it immediately closes the substream without sending back anything. +/// - Node A can then send notifications to B, prefixed with a variable-length integer indicating +/// the length of the message. +/// - Node A closes its writing side if it doesn't want the notifications substream anymore. +/// +/// Notification substreams are unidirectional. If A opens a substream with B, then B is +/// encouraged but not required to open a substream to A as well. +/// + +use bytes::BytesMut; +use futures::{prelude::*, ready}; +use futures_codec::Framed; +use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; +use log::error; +use std::{borrow::Cow, collections::VecDeque, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use unsigned_varint::codec::UviBytes; + +/// Maximum allowed size of the two handshake messages, in bytes. +const MAX_HANDSHAKE_SIZE: usize = 1024; +/// Maximum number of buffered messages before we consider the remote unresponsive and kill the +/// substream. +const MAX_PENDING_MESSAGES: usize = 256; + +/// Upgrade that accepts a substream, sends back a status message, then becomes a unidirectional +/// stream of messages. +#[derive(Debug, Clone)] +pub struct NotificationsIn { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, +} + +/// Upgrade that opens a substream, waits for the remote to accept by sending back a status +/// message, then becomes a unidirectional sink of data. +#[derive(Debug, Clone)] +pub struct NotificationsOut { + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, + /// Message to send when we start the handshake. + initial_message: Vec, +} + +/// A substream for incoming notification messages. +/// +/// When creating, this struct starts in a state in which we must first send back a handshake +/// message to the remote. No message will come before this has been done. +#[pin_project::pin_project] +pub struct NotificationsInSubstream { + #[pin] + socket: Framed>>>, + handshake: NotificationsInSubstreamHandshake, +} + +/// State of the handshake sending back process. +enum NotificationsInSubstreamHandshake { + /// Waiting for the user to give us the handshake message. + NotSent, + /// User gave us the handshake message. Trying to push it in the socket. + PendingSend(Vec), + /// Handshake message was pushed in the socket. Still need to flush. + Close, + /// Handshake message successfully sent. + Sent, +} + +/// A substream for outgoing notification messages. +#[pin_project::pin_project] +pub struct NotificationsOutSubstream { + /// Substream where to send messages. + #[pin] + socket: Framed>>>, + /// Queue of messages waiting to be sent. + messages_queue: VecDeque>, + /// If true, we need to flush `socket`. + need_flush: bool, +} + +impl NotificationsIn { + /// Builds a new potential upgrade. + pub fn new(protocol_name: impl Into>) -> Self { + NotificationsIn { + protocol_name: protocol_name.into(), + } + } + + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + &self.protocol_name + } +} + +impl UpgradeInfo for NotificationsIn { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl InboundUpgrade for NotificationsIn +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (Vec, NotificationsInSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_inbound( + self, + mut socket: TSubstream, + _: Self::Info, + ) -> Self::Future { + Box::pin(async move { + let initial_message_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if initial_message_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: initial_message_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut initial_message = vec![0u8; initial_message_len]; + if !initial_message.is_empty() { + socket.read(&mut initial_message).await?; + } + + let substream = NotificationsInSubstream { + socket: Framed::new(socket, UviBytes::default()), + handshake: NotificationsInSubstreamHandshake::NotSent, + }; + + Ok((initial_message, substream)) + }) + } +} + +impl NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite, +{ + /// Sends the handshake in order to inform the remote that we accept the substream. + pub fn send_handshake(&mut self, message: impl Into>) { + match self.handshake { + NotificationsInSubstreamHandshake::NotSent => {} + _ => { + error!(target: "sub-libp2p", "Tried to send handshake twice"); + return; + } + } + + self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); + } +} + +impl Stream for NotificationsInSubstream +where TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + // This `Stream` implementation first tries to send back the handshake if necessary. + loop { + match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::Sent => + return Stream::poll_next(this.socket.as_mut(), cx), + NotificationsInSubstreamHandshake::NotSent => + return Poll::Pending, + NotificationsInSubstreamHandshake::PendingSend(msg) => + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(_) => { + *this.handshake = NotificationsInSubstreamHandshake::Close; + match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { + Ok(()) => {}, + Err(err) => return Poll::Ready(Some(Err(err))), + } + }, + Poll::Pending => + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg), + }, + NotificationsInSubstreamHandshake::Close => + match Sink::poll_close(this.socket.as_mut(), cx)? { + Poll::Ready(()) => + *this.handshake = NotificationsInSubstreamHandshake::Sent, + Poll::Pending => + *this.handshake = NotificationsInSubstreamHandshake::Close, + }, + } + } + } +} + +impl NotificationsOut { + /// Builds a new potential upgrade. + pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { + let initial_message = initial_message.into(); + if initial_message.len() > MAX_HANDSHAKE_SIZE { + error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); + } + + NotificationsOut { + protocol_name: protocol_name.into(), + initial_message, + } + } +} + +impl UpgradeInfo for NotificationsOut { + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; + + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } +} + +impl OutboundUpgrade for NotificationsOut +where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +{ + type Output = (Vec, NotificationsOutSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_outbound( + self, + mut socket: TSubstream, + _: Self::Info, + ) -> Self::Future { + Box::pin(async move { + upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; + + // Reading handshake. + let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if handshake_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: handshake_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut handshake = vec![0u8; handshake_len]; + if !handshake.is_empty() { + socket.read(&mut handshake).await?; + } + + Ok((handshake, NotificationsOutSubstream { + socket: Framed::new(socket, UviBytes::default()), + messages_queue: VecDeque::with_capacity(MAX_PENDING_MESSAGES), + need_flush: false, + })) + }) + } +} + +impl Sink> for NotificationsOutSubstream + where TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + type Error = NotificationsOutError; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + if self.messages_queue.len() >= MAX_PENDING_MESSAGES { + return Err(NotificationsOutError::Clogged); + } + + self.messages_queue.push_back(item); + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + while !this.messages_queue.is_empty() { + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), + Poll::Ready(Ok(())) => { + let msg = this.messages_queue.pop_front() + .expect("checked for !is_empty above; qed"); + Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg))?; + *this.need_flush = true; + }, + Poll::Pending => return Poll::Pending, + } + } + + if *this.need_flush { + match Sink::poll_flush(this.socket.as_mut(), cx) { + Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), + Poll::Ready(Ok(())) => *this.need_flush = false, + Poll::Pending => return Poll::Pending, + } + } + + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(Sink::poll_flush(self.as_mut(), cx))?; + let this = self.project(); + match Sink::poll_close(this.socket, cx) { + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + Poll::Ready(Err(err)) => Poll::Ready(Err(From::from(err))), + Poll::Pending => Poll::Pending, + } + } +} + +/// Error generated by sending on a notifications out substream. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum NotificationsHandshakeError { + /// I/O error on the substream. + Io(io::Error), + + /// Initial message or handshake was too large. + #[display(fmt = "Initial message or handshake was too large: {}", requested)] + TooLarge { + /// Size requested by the remote. + requested: usize, + /// Maximum allowed, + max: usize, + }, + + /// Error while decoding the variable-length integer. + VarintDecode(unsigned_varint::decode::Error), +} + +impl From for NotificationsHandshakeError { + fn from(err: unsigned_varint::io::ReadError) -> Self { + match err { + unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), + unsigned_varint::io::ReadError::Decode(err) => NotificationsHandshakeError::VarintDecode(err), + _ => { + log::warn!("Unrecognized varint decoding error"); + NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) + } + } + } +} + +/// Error generated by sending on a notifications out substream. +#[derive(Debug, derive_more::From, derive_more::Display)] +pub enum NotificationsOutError { + /// I/O error on the substream. + Io(io::Error), + + /// Remote doesn't process our messages quickly enough. + /// + /// > **Note**: This is not necessarily the remote's fault, and could also be caused by the + /// > local node sending data too quickly. Properly doing back-pressure, however, + /// > would require a deep refactoring effort in Substrate as a whole. + Clogged, +} + +#[cfg(test)] +mod tests { + use super::{NotificationsIn, NotificationsOut}; + + use async_std::net::{TcpListener, TcpStream}; + use futures::{prelude::*, channel::oneshot}; + use libp2p::core::upgrade; + use std::pin::Pin; + + #[test] + fn basic_works() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1 + ).await.unwrap(); + + assert_eq!(handshake, b"hello world"); + substream.send(b"test message".to_vec()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert_eq!(initial_message, b"initial message"); + substream.send_handshake(&b"hello world"[..]); + + let msg = substream.next().await.unwrap().unwrap(); + assert_eq!(msg.as_ref(), b"test message"); + }); + + async_std::task::block_on(client); + } + + #[test] + fn empty_handshake() { + // Check that everything still works when the handshake messages are empty. + + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, vec![]), + upgrade::Version::V1 + ).await.unwrap(); + + assert!(handshake.is_empty()); + substream.send(Default::default()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert!(initial_message.is_empty()); + substream.send_handshake(vec![]); + + let msg = substream.next().await.unwrap().unwrap(); + assert!(msg.as_ref().is_empty()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let outcome = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"hello"[..]), + upgrade::Version::V1 + ).await; + + // Despite the protocol negotiation being successfully conducted on the listener + // side, we have to receive an error here because the listener didn't send the + // handshake. + assert!(outcome.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_msg, substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert_eq!(initial_msg, b"hello"); + + // We successfully upgrade to the protocol, but then close the substream. + drop(substream); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_initial_message_refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let ret = upgrade::apply_outbound( + socket, + // We check that an initial message that is too large gets refused. + NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), + upgrade::Version::V1 + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let ret = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_handshake_refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let ret = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1 + ).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + assert_eq!(initial_message, b"initial message"); + + // We check that a handshake that is too large gets refused. + substream.send_handshake((0..32768).map(|_| 0).collect::>()); + let _ = substream.next().await; + }); + + async_std::task::block_on(client); + } + + #[test] + fn buffer_is_full_closes_connection() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, vec![]), + upgrade::Version::V1 + ).await.unwrap(); + + assert!(handshake.is_empty()); + + // Push an item and flush so that the test works. + substream.send(b"hello world".to_vec()).await.unwrap(); + + for _ in 0..32768 { + // Push an item on the sink without flushing until an error happens because the + // buffer is full. + let message = b"hello world!".to_vec(); + if future::poll_fn(|cx| Sink::poll_ready(Pin::new(&mut substream), cx)).await.is_err() { + return Ok(()); + } + if Sink::start_send(Pin::new(&mut substream), message).is_err() { + return Ok(()); + } + } + + Err(()) + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = upgrade::apply_inbound( + socket, + NotificationsIn::new(PROTO_NAME) + ).await.unwrap(); + + assert!(initial_message.is_empty()); + substream.send_handshake(vec![]); + + // Process one message so that the handshake and all works. + let _ = substream.next().await.unwrap().unwrap(); + + client.await.unwrap(); + }); + } +} diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 77cf71408d6c49f78511cb6c6936820eaa126c1e..a141e134fca051a40c67bdf84c4662edcda60ae5 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -127,7 +127,7 @@ impl Config { let mut v = Vec::new(); v.extend_from_slice(b"/"); v.extend_from_slice(id.as_bytes()); - v.extend_from_slice(b"/light/1"); + v.extend_from_slice(b"/light/2"); self.protocol = v.into(); self } @@ -350,7 +350,7 @@ where , response: api::v1::light::Response ) -> Result, Error> { - log::trace!("response {} from {}", response.id, peer); + log::trace!("response from {}", peer); use api::v1::light::response::Response; match response.response { Some(Response::RemoteCallResponse(response)) => @@ -419,12 +419,10 @@ where fn on_remote_call_request ( &mut self , peer: &PeerId - , request_id: u64 , request: &api::v1::light::RemoteCallRequest ) -> Result { - log::trace!("remote call request {} from {} ({} at {:?})", - request_id, + log::trace!("remote call request from {} ({} at {:?})", peer, request.method, request.block); @@ -434,8 +432,7 @@ where let proof = match self.chain.execution_proof(&block, &request.method, &request.data) { Ok((_, proof)) => proof, Err(e) => { - log::trace!("remote call request {} from {} ({} at {:?}) failed with: {}", - request_id, + log::trace!("remote call request from {} ({} at {:?}) failed with: {}", peer, request.method, request.block, @@ -449,13 +446,12 @@ where api::v1::light::response::Response::RemoteCallResponse(r) }; - Ok(api::v1::light::Response { id: request_id, response: Some(response) }) + Ok(api::v1::light::Response { response: Some(response) }) } fn on_remote_read_request ( &mut self , peer: &PeerId - , request_id: u64 , request: &api::v1::light::RemoteReadRequest ) -> Result { @@ -464,19 +460,17 @@ where return Err(Error::BadRequest("remote read request without keys")) } - log::trace!("remote read request {} from {} ({} at {:?})", - request_id, + log::trace!("remote read request from {} ({} at {:?})", peer, fmt_keys(request.keys.first(), request.keys.last()), request.block); let block = Decode::decode(&mut request.block.as_ref())?; - let proof = match self.chain.read_proof(&block, &request.keys) { + let proof = match self.chain.read_proof(&block, &mut request.keys.iter().map(AsRef::as_ref)) { Ok(proof) => proof, Err(error) => { - log::trace!("remote read request {} from {} ({} at {:?}) failed with: {}", - request_id, + log::trace!("remote read request from {} ({} at {:?}) failed with: {}", peer, fmt_keys(request.keys.first(), request.keys.last()), request.block, @@ -490,13 +484,12 @@ where api::v1::light::response::Response::RemoteReadResponse(r) }; - Ok(api::v1::light::Response { id: request_id, response: Some(response) }) + Ok(api::v1::light::Response { response: Some(response) }) } fn on_remote_read_child_request ( &mut self , peer: &PeerId - , request_id: u64 , request: &api::v1::light::RemoteReadChildRequest ) -> Result { @@ -505,8 +498,7 @@ where return Err(Error::BadRequest("remove read child request without keys")) } - log::trace!("remote read child request {} from {} ({} {} at {:?})", - request_id, + log::trace!("remote read child request from {} ({} {} at {:?})", peer, request.storage_key.to_hex::(), fmt_keys(request.keys.first(), request.keys.last()), @@ -516,11 +508,15 @@ where let proof = if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof(&block, &request.storage_key, info, &request.keys) { + match self.chain.read_child_proof( + &block, + &request.storage_key, + info, + &mut request.keys.iter().map(AsRef::as_ref) + ) { Ok(proof) => proof, Err(error) => { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, + log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, request.storage_key.to_hex::(), fmt_keys(request.keys.first(), request.keys.last()), @@ -530,8 +526,7 @@ where } } } else { - log::trace!("remote read child request {} from {} ({} {} at {:?}) failed with: {}", - request_id, + log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", peer, request.storage_key.to_hex::(), fmt_keys(request.keys.first(), request.keys.last()), @@ -546,25 +541,23 @@ where api::v1::light::response::Response::RemoteReadResponse(r) }; - Ok(api::v1::light::Response { id: request_id, response: Some(response) }) + Ok(api::v1::light::Response { response: Some(response) }) } fn on_remote_header_request ( &mut self , peer: &PeerId - , request_id: u64 , request: &api::v1::light::RemoteHeaderRequest ) -> Result { - log::trace!("remote header proof request {} from {} ({:?})", request_id, peer, request.block); + log::trace!("remote header proof request from {} ({:?})", peer, request.block); let block = Decode::decode(&mut request.block.as_ref())?; let (header, proof) = match self.chain.header_proof(block) { Ok((header, proof)) => (header.encode(), proof), Err(error) => { - log::trace!("remote header proof request {} from {} ({:?}) failed with: {}", - request_id, + log::trace!("remote header proof request from {} ({:?}) failed with: {}", peer, request.block, error); @@ -577,18 +570,16 @@ where api::v1::light::response::Response::RemoteHeaderResponse(r) }; - Ok(api::v1::light::Response { id: request_id, response: Some(response) }) + Ok(api::v1::light::Response { response: Some(response) }) } fn on_remote_changes_request ( &mut self , peer: &PeerId - , request_id: u64 , request: &api::v1::light::RemoteChangesRequest ) -> Result { - log::trace!("remote changes proof request {} from {} for key {} ({:?}..{:?})", - request_id, + log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", peer, if !request.storage_key.is_empty() { format!("{} : {}", request.storage_key.to_hex::(), request.key.to_hex::()) @@ -613,8 +604,7 @@ where let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) { Ok(proof) => proof, Err(error) => { - log::trace!("remote changes proof request {} from {} for key {} ({:?}..{:?}) failed with: {}", - request_id, + log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", peer, if let Some(sk) = storage_key { format!("{} : {}", sk.0.to_hex::(), key.0.to_hex::()) @@ -646,7 +636,7 @@ where api::v1::light::response::Response::RemoteChangesResponse(r) }; - Ok(api::v1::light::Response { id: request_id, response: Some(response) }) + Ok(api::v1::light::Response { response: Some(response) }) } } @@ -697,29 +687,29 @@ where match event { // An incoming request from remote has been received. Event::Request(request, mut stream) => { - log::trace!("incoming request {} from {}", peer, request.id); + log::trace!("incoming request from {}", peer); let result = match &request.request { Some(api::v1::light::request::Request::RemoteCallRequest(r)) => - self.on_remote_call_request(&peer, request.id, r), + self.on_remote_call_request(&peer, r), Some(api::v1::light::request::Request::RemoteReadRequest(r)) => - self.on_remote_read_request(&peer, request.id, r), + self.on_remote_read_request(&peer, r), Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, request.id, r), + self.on_remote_header_request(&peer, r), Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => - self.on_remote_read_child_request(&peer, request.id, r), + self.on_remote_read_child_request(&peer, r), Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, request.id, r), + self.on_remote_changes_request(&peer, r), None => { - log::debug!("ignoring request {} without request data from peer {}", request.id, peer); + log::debug!("ignoring request without request data from peer {}", peer); return } }; match result { Ok(response) => { - log::trace!("enqueueing response {} for peer {}", response.id, peer); + log::trace!("enqueueing response for peer {}", peer); let mut data = Vec::new(); if let Err(e) = response.encode(&mut data) { - log::debug!("error encoding response {} for peer {}: {}", response.id, peer, e) + log::debug!("error encoding response for peer {}: {}", peer, e) } else { let future = async move { if let Err(e) = write_one(&mut stream, data).await { @@ -733,16 +723,15 @@ where self.remove_peer(&peer); self.peerset.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) } - Err(e) => log::debug!("error handling request {} from peer {}: {}", request.id, peer, e) + Err(e) => log::debug!("error handling request from peer {}: {}", peer, e) } } // A response to one of our own requests has been received. - Event::Response(response) => { - let id = response.id; + Event::Response(id, response) => { if let Some(request) = self.outstanding.remove(&id) { // We first just check if the response originates from the expected peer. if request.peer != peer { - log::debug!("was expecting response {} from {} instead of {}", id, request.peer, peer); + log::debug!("was expecting response from {} instead of {}", request.peer, peer); self.outstanding.insert(id, request); self.remove_peer(&peer); self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); @@ -836,16 +825,17 @@ where } }; if let Some(peer) = available_peer { - let id = self.next_request_id(); - let rq = serialize_request(id, &request.request); + let rq = serialize_request(&request.request); let mut buf = Vec::with_capacity(rq.encoded_len()); if let Err(e) = rq.encode(&mut buf) { - log::debug!("failed to serialize request {}: {}", id, e); + log::debug!("failed to serialize request: {}", e); send_reply(Err(ClientError::RemoteFetchFailed), request.request) } else { + let id = self.next_request_id(); log::trace!("sending request {} to peer {}", id, peer); let protocol = OutboundProtocol { request: buf, + request_id: id, max_data_size: self.config.max_data_size, protocol: self.config.protocol.clone(), }; @@ -918,7 +908,7 @@ fn retries(request: &Request) -> usize { rc.unwrap_or(0) } -fn serialize_request(id: u64, request: &Request) -> api::v1::light::Request { +fn serialize_request(request: &Request) -> api::v1::light::Request { let request = match request { Request::Header { request, .. } => { let r = api::v1::light::RemoteHeaderRequest { block: request.block.encode() }; @@ -962,7 +952,7 @@ fn serialize_request(id: u64, request: &Request) -> api::v1::light: } }; - api::v1::light::Request { id, request: Some(request) } + api::v1::light::Request { request: Some(request) } } fn send_reply(result: Result, ClientError>, request: Request) { @@ -1004,7 +994,7 @@ pub enum Event { /// Incoming request from remote and substream to use for the response. Request(api::v1::light::Request, T), /// Incoming response from remote. - Response(api::v1::light::Response), + Response(u64, api::v1::light::Response), } /// Substream upgrade protocol. @@ -1054,6 +1044,8 @@ where pub struct OutboundProtocol { /// The serialized protobuf request. request: Vec, + /// Local identifier for the request. Used to associate it with a response. + request_id: u64, /// The max. request length in bytes. max_data_size: usize, /// The protocol to use for upgrade negotiation. @@ -1082,7 +1074,7 @@ where write_one(&mut s, &self.request).await?; let vec = read_one(&mut s, self.max_data_size).await?; api::v1::light::Response::decode(&vec[..]) - .map(Event::Response) + .map(|r| Event::Response(self.request_id, r)) .map_err(|e| { ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) }) @@ -1308,53 +1300,6 @@ mod tests { assert_eq!(0, behaviour.outstanding.len()); } - #[test] - fn disconnects_from_peer_on_response_with_wrong_id() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connected(peer.clone(), empty_dialer()); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - // Construct response with bogus ID - let response = { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - id: 2365789, - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - // Make sure our bogus ID is really not used. - assert!(!behaviour.outstanding.keys().any(|id| id == &response.id)); - - behaviour.inject_node_event(peer.clone(), Event::Response(response)); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - #[test] fn disconnects_from_peer_on_incorrect_response() { let peer = PeerId::random(); @@ -1386,12 +1331,11 @@ mod tests { let response = { let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; api::v1::light::Response { - id: request_id, response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), } }; - behaviour.inject_node_event(peer.clone(), Event::Response(response)); + behaviour.inject_node_event(peer.clone(), Event::Response(request_id, response)); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); // More progress @@ -1416,12 +1360,11 @@ mod tests { let response = { let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; api::v1::light::Response { - id: 2347895932, response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), } }; - behaviour.inject_node_event(peer.clone(), Event::Response(response)); + behaviour.inject_node_event(peer.clone(), Event::Response(2347895932, response)); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); @@ -1459,12 +1402,11 @@ mod tests { let response = { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! api::v1::light::Response { - id: request_id, response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } }; - behaviour.inject_node_event(peer.clone(), Event::Response(response)); + behaviour.inject_node_event(peer.clone(), Event::Response(request_id, response)); assert!(behaviour.peers.is_empty()); poll(&mut behaviour); // More progress @@ -1513,11 +1455,10 @@ mod tests { let response = { let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; api::v1::light::Response { - id: request_id, response: Some(api::v1::light::response::Response::RemoteCallResponse(r)) } }; - behaviour.inject_node_event(responding_peer, Event::Response(response.clone())); + behaviour.inject_node_event(responding_peer, Event::Response(request_id, response.clone())); assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::SendEvent { .. })); assert_matches!(chan.1.try_recv(), Ok(None)) } @@ -1527,11 +1468,10 @@ mod tests { let response = { let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; api::v1::light::Response { - id: request_id, response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), } }; - behaviour.inject_node_event(responding_peer, Event::Response(response)); + behaviour.inject_node_event(responding_peer, Event::Response(request_id, response)); assert_matches!(poll(&mut behaviour), Poll::Pending); assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) } @@ -1551,28 +1491,24 @@ mod tests { proof: empty_proof() }; api::v1::light::Response { - id: 1, response: Some(api::v1::light::response::Response::RemoteHeaderResponse(r)), } } Request::Read{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { - id: 1, response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } Request::ReadChild{..} => { let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; api::v1::light::Response { - id: 1, response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), } } Request::Call{..} => { let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; api::v1::light::Response { - id: 1, response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), } } @@ -1584,7 +1520,6 @@ mod tests { roots_proof: empty_proof() }; api::v1::light::Response { - id: 1, response: Some(api::v1::light::response::Response::RemoteChangesResponse(r)), } } @@ -1599,7 +1534,7 @@ mod tests { assert_eq!(1, behaviour.outstanding.len()); assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); - behaviour.inject_node_event(peer.clone(), Event::Response(response)); + behaviour.inject_node_event(peer.clone(), Event::Response(1, response)); poll(&mut behaviour); diff --git a/client/network/src/protocol/light_dispatch.rs b/client/network/src/protocol/light_dispatch.rs index 738847dbaf3cf2229503e53855f2d18318ffad5b..aff220b6e03a6cdf41389d881e62f73497cea003 100644 --- a/client/network/src/protocol/light_dispatch.rs +++ b/client/network/src/protocol/light_dispatch.rs @@ -30,7 +30,7 @@ use sp_blockchain::Error as ClientError; use sc_client_api::{FetchChecker, RemoteHeaderRequest, RemoteCallRequest, RemoteReadRequest, RemoteChangesRequest, ChangesProof, RemoteReadChildRequest, RemoteBodyRequest, StorageProof}; -use crate::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; +use crate::protocol::message::{self, BlockAttributes, Direction, FromBlock, RequestId}; use libp2p::PeerId; use crate::config::Roles; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index a2261b2059110af0049e520a093f0acf262512c1..a12c26da2e47eec14ae136ede05e6094d9e0c715 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -219,9 +219,6 @@ pub mod generic { FinalityProofResponse(FinalityProofResponse), /// Batch of consensus protocol messages. ConsensusBatch(Vec), - /// Chain-specific message. - #[codec(index = "255")] - ChainSpecific(Vec), } impl Message { @@ -246,7 +243,6 @@ pub mod generic { Message::FinalityProofRequest(_) => "FinalityProofRequest", Message::FinalityProofResponse(_) => "FinalityProofResponse", Message::ConsensusBatch(_) => "ConsensusBatch", - Message::ChainSpecific(_) => "ChainSpecific", } } } diff --git a/client/network/src/protocol/schema/api.v1.proto b/client/network/src/protocol/schema/api.v1.proto index 73128c53de46635a1af74cd78c6f19087028af7a..ccbf49d666115ea50fbe08885c9242c891d805a1 100644 --- a/client/network/src/protocol/schema/api.v1.proto +++ b/client/network/src/protocol/schema/api.v1.proto @@ -14,31 +14,27 @@ enum Direction { // Request block data from a peer. message BlockRequest { - // Unique request id. - uint64 id = 1; // Bits of block data to request. - uint32 fields = 2; + uint32 fields = 1; // Start from this block. oneof from_block { // Start with given hash. - bytes hash = 3; + bytes hash = 2; // Start with given block number. - bytes number = 4; + bytes number = 3; } // End at this block. An implementation defined maximum is used when unspecified. - bytes to_block = 5; // optional + bytes to_block = 4; // optional // Sequence direction. - Direction direction = 6; + Direction direction = 5; // Maximum number of blocks to return. An implementation defined maximum is used when unspecified. - uint32 max_blocks = 7; // optional + uint32 max_blocks = 6; // optional } // Response to `BlockRequest` message BlockResponse { - // Id of a request this response was made for. - uint64 id = 1; // Block data for the requested sequence. - repeated BlockData blocks = 2; + repeated BlockData blocks = 1; } // Block data sent in the response. diff --git a/client/network/src/protocol/schema/light.v1.proto b/client/network/src/protocol/schema/light.v1.proto index b9aee67b5ee24ff6614e3db2a0c711988a8d03c1..1c98d49730cf98dcafb9999056682f6c40efb101 100644 --- a/client/network/src/protocol/schema/light.v1.proto +++ b/client/network/src/protocol/schema/light.v1.proto @@ -14,26 +14,22 @@ message Pair { // Enumerate all possible light client request messages. message Request { - // Unique request id. - uint64 id = 1; oneof request { - RemoteCallRequest remote_call_request = 2; - RemoteReadRequest remote_read_request = 3; - RemoteHeaderRequest remote_header_request = 4; - RemoteReadChildRequest remote_read_child_request = 5; - RemoteChangesRequest remote_changes_request = 6; + RemoteCallRequest remote_call_request = 1; + RemoteReadRequest remote_read_request = 2; + RemoteHeaderRequest remote_header_request = 3; + RemoteReadChildRequest remote_read_child_request = 4; + RemoteChangesRequest remote_changes_request = 5; } } // Enumerate all possible light client response messages. message Response { - /// Id of a request this response was made for. - uint64 id = 1; oneof response { - RemoteCallResponse remote_call_response = 2; - RemoteReadResponse remote_read_response = 3; - RemoteHeaderResponse remote_header_response = 4; - RemoteChangesResponse remote_changes_response = 6; + RemoteCallResponse remote_call_response = 1; + RemoteReadResponse remote_read_response = 2; + RemoteHeaderResponse remote_header_response = 3; + RemoteChangesResponse remote_changes_response = 4; } } diff --git a/client/network/src/protocol/specialization.rs b/client/network/src/protocol/specialization.rs deleted file mode 100644 index 6ffa335c8c330fb1689acf7176c8442dd31d4e8a..0000000000000000000000000000000000000000 --- a/client/network/src/protocol/specialization.rs +++ /dev/null @@ -1,171 +0,0 @@ -// Copyright 2017-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! Specializations of the substrate network protocol to allow more complex forms of communication. - -pub use crate::protocol::event::{DhtEvent, Event}; - -use crate::protocol::Context; -use libp2p::PeerId; -use sp_runtime::traits::Block as BlockT; - -/// A specialization of the substrate network protocol. Handles events and sends messages. -pub trait NetworkSpecialization: Send + Sync + 'static { - /// Get the current specialization-status. - fn status(&self) -> Vec; - - /// Called when a peer successfully handshakes. - fn on_connect(&mut self, ctx: &mut dyn Context, who: PeerId, status: crate::message::Status); - - /// Called when a peer is disconnected. If the peer ID is unknown, it should be ignored. - fn on_disconnect(&mut self, ctx: &mut dyn Context, who: PeerId); - - /// Called when a network-specific message arrives. - fn on_message( - &mut self, - ctx: &mut dyn Context, - who: PeerId, - message: Vec - ); - - /// Called periodically to maintain peers and handle timeouts. - fn maintain_peers(&mut self, _ctx: &mut dyn Context) { } - - /// Called when a block is _imported_ at the head of the chain (not during major sync). - /// Not guaranteed to be called for every block, but will be most of the after major sync. - fn on_block_imported(&mut self, _ctx: &mut dyn Context, _hash: B::Hash, _header: &B::Header) { } -} - -/// A specialization that does nothing. -#[derive(Clone)] -pub struct DummySpecialization; - -impl NetworkSpecialization for DummySpecialization { - fn status(&self) -> Vec { - vec![] - } - - fn on_connect( - &mut self, - _ctx: &mut dyn Context, - _peer_id: PeerId, - _status: crate::message::Status - ) {} - - fn on_disconnect(&mut self, _ctx: &mut dyn Context, _peer_id: PeerId) {} - - fn on_message( - &mut self, - _ctx: &mut dyn Context, - _peer_id: PeerId, - _message: Vec, - ) {} -} - -/// Construct a simple protocol that is composed of several sub protocols. -/// Each "sub protocol" needs to implement `Specialization` and needs to provide a `new()` function. -/// For more fine grained implementations, this macro is not usable. -/// -/// # Example -/// -/// ```nocompile -/// construct_simple_protocol! { -/// pub struct MyProtocol where Block = MyBlock { -/// consensus_gossip: ConsensusGossip, -/// other_protocol: MyCoolStuff, -/// } -/// } -/// ``` -/// -/// You can also provide an optional parameter after `where Block = MyBlock`, so it looks like -/// `where Block = MyBlock, Status = consensus_gossip`. This will instruct the implementation to -/// use the `status()` function from the `ConsensusGossip` protocol. By default, `status()` returns -/// an empty vector. -#[macro_export] -macro_rules! construct_simple_protocol { - ( - $( #[ $attr:meta ] )* - pub struct $protocol:ident where - Block = $block:ident - $( , Status = $status_protocol_name:ident )* - { - $( $sub_protocol_name:ident : $sub_protocol:ident $( <$protocol_block:ty> )*, )* - } - ) => { - $( #[$attr] )* - pub struct $protocol { - $( $sub_protocol_name: $sub_protocol $( <$protocol_block> )*, )* - } - - impl $protocol { - /// Instantiate a node protocol handler. - pub fn new() -> Self { - Self { - $( $sub_protocol_name: $sub_protocol::new(), )* - } - } - } - - impl $crate::specialization::NetworkSpecialization<$block> for $protocol { - fn status(&self) -> Vec { - $( - let status = self.$status_protocol_name.status(); - - if !status.is_empty() { - return status; - } - )* - - Vec::new() - } - - fn on_connect( - &mut self, - _ctx: &mut $crate::Context<$block>, - _who: $crate::PeerId, - _status: $crate::StatusMessage<$block> - ) { - $( self.$sub_protocol_name.on_connect(_ctx, _who, _status); )* - } - - fn on_disconnect(&mut self, _ctx: &mut $crate::Context<$block>, _who: $crate::PeerId) { - $( self.$sub_protocol_name.on_disconnect(_ctx, _who); )* - } - - fn on_message( - &mut self, - _ctx: &mut $crate::Context<$block>, - _who: $crate::PeerId, - _message: Vec, - ) { - $( self.$sub_protocol_name.on_message(_ctx, _who, _message); )* - } - - fn maintain_peers(&mut self, _ctx: &mut $crate::Context<$block>) { - $( self.$sub_protocol_name.maintain_peers(_ctx); )* - } - - fn on_block_imported( - &mut self, - _ctx: &mut $crate::Context<$block>, - _hash: <$block as $crate::BlockT>::Hash, - _header: &<$block as $crate::BlockT>::Header - ) { - $( self.$sub_protocol_name.on_block_imported(_ctx, _hash, _header); )* - } - } - } -} diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index 88e663c904bb1175704593db5427442c4a2ee7b8..04afc5d918471514ac2ece465f305531273f6452 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -35,7 +35,7 @@ use sp_consensus::{BlockOrigin, BlockStatus, }; use crate::{ config::{Roles, BoxFinalityProofRequestBuilder}, - message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, + protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, FinalityProofResponse}, }; use either::Either; @@ -751,7 +751,7 @@ impl ChainSync { | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() } } else { - // When request.is_none() just accept blocks + // When request.is_none() this is a block announcement. Just accept blocks. blocks.into_iter().map(|b| { IncomingBlock { hash: b.hash, @@ -1167,8 +1167,7 @@ impl ChainSync { } /// Restart the sync process. - fn restart<'a>(&'a mut self) -> impl Iterator), BadPeer>> + 'a - { + fn restart<'a>(&'a mut self) -> impl Iterator), BadPeer>> + 'a { self.queue_blocks.clear(); self.blocks.clear(); let info = self.client.info(); @@ -1203,6 +1202,27 @@ impl ChainSync { fn is_already_downloading(&self, hash: &B::Hash) -> bool { self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) } + + /// Return some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + finality_proofs: self.extra_finality_proofs.metrics(), + justifications: self.extra_justifications.metrics(), + _priv: () + } + } +} + +#[derive(Debug)] +pub(crate) struct Metrics { + pub(crate) queued_blocks: u32, + pub(crate) fork_targets: u32, + pub(crate) finality_proofs: extra_requests::Metrics, + pub(crate) justifications: extra_requests::Metrics, + _priv: () } /// Request the ancestry for a block. Sends a request for header and justification for the given diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 974935f765e6c09a9c390b2b5aa17e5813bb9fed..31b798ace28a54b207df5d43e625fb2d0c9170f2 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::mem; use std::cmp; use std::ops::Range; use std::collections::{HashMap, BTreeMap}; @@ -22,7 +21,7 @@ use std::collections::hash_map::Entry; use log::trace; use libp2p::PeerId; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; -use crate::message; +use crate::protocol::message; /// Block data with origin. #[derive(Debug, Clone, PartialEq, Eq)] @@ -104,8 +103,7 @@ impl BlockCollection { common: NumberFor, max_parallel: u32, max_ahead: u32, - ) -> Option>> - { + ) -> Option>> { if peer_best <= common { // Bail out early return None; @@ -165,20 +163,20 @@ impl BlockCollection { pub fn drain(&mut self, from: NumberFor) -> Vec> { let mut drained = Vec::new(); let mut ranges = Vec::new(); - { - let mut prev = from; - for (start, range_data) in &mut self.blocks { - match range_data { - &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { - prev = *start + (blocks.len() as u32).into(); - let mut blocks = mem::replace(blocks, Vec::new()); - drained.append(&mut blocks); - ranges.push(*start); - }, - _ => break, - } + + let mut prev = from; + for (start, range_data) in &mut self.blocks { + match range_data { + &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { + prev = *start + (blocks.len() as u32).into(); + // Remove all elements from `blocks` and add them to `drained` + drained.append(blocks); + ranges.push(*start); + }, + _ => break, } } + for r in ranges { self.blocks.remove(&r); } diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 38c250cddf26d920d9e2f2e2e08eed19ac53a3e4..81b12a1a704aab9b17737c4a03190bef519ff3c5 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -53,6 +53,15 @@ pub(crate) struct ExtraRequests { request_type_name: &'static str, } +#[derive(Debug)] +pub(crate) struct Metrics { + pub(crate) pending_requests: u32, + pub(crate) active_requests: u32, + pub(crate) importing_requests: u32, + pub(crate) failed_requests: u32, + _priv: () +} + impl ExtraRequests { pub(crate) fn new(request_type_name: &'static str) -> Self { ExtraRequests { @@ -240,6 +249,18 @@ impl ExtraRequests { pub(crate) fn pending_requests(&self) -> impl Iterator> { self.pending_requests.iter() } + + /// Get some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX), + active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), + failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), + importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), + _priv: () + } + } } /// Matches peers with pending extra requests. diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 8d990282981bb795ce65db10ef636de943847156..a220a009f3fafb075b8b406c2d0ecc4cd82f192d 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -25,7 +25,7 @@ //! The methods of the [`NetworkService`] are implemented by sending a message over a channel, //! which is then processed by [`NetworkWorker::poll`]. -use std::{collections::{HashMap, HashSet}, fs, marker::PhantomData, io, path::Path}; +use std::{borrow::Cow, collections::{HashMap, HashSet}, fs, marker::PhantomData, io, path::Path, str}; use std::sync::{Arc, atomic::{AtomicBool, AtomicUsize, Ordering}}; use std::pin::Pin; use std::task::Poll; @@ -39,15 +39,15 @@ use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent}; use parking_lot::Mutex; use sc_peerset::PeersetHandle; use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId}; +use prometheus_endpoint::{Registry, Counter, CounterVec, Gauge, GaugeVec, Opts, U64, register, PrometheusError}; use crate::{behaviour::{Behaviour, BehaviourOut}, config::{parse_str_addr, parse_addr}}; -use crate::{NetworkState, NetworkStateNotConnectedPeer, NetworkStatePeer}; use crate::{transport, config::NonReservedPeerMode, ReputationChange}; use crate::config::{Params, TransportConfig}; use crate::error::Error; -use crate::protocol::{self, Protocol, Context, PeerInfo}; +use crate::network_state::{NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer}; +use crate::protocol::{self, Protocol, PeerInfo}; use crate::protocol::{event::Event, light_dispatch::{AlwaysBadChecker, RequestData}}; -use crate::protocol::specialization::NetworkSpecialization; use crate::protocol::sync::SyncState; /// Minimum Requirements for a Hash within Networking @@ -101,7 +101,7 @@ impl ReportHandle { } /// Substrate network service. Handles network IO and manages connectivity. -pub struct NetworkService, H: ExHashT> { +pub struct NetworkService { /// Number of peers we're connected to. num_connected: Arc, /// The local external addresses. @@ -116,19 +116,19 @@ pub struct NetworkService, H: E /// nodes it should be connected to or not. peerset: PeersetHandle, /// Channel that sends messages to the actual worker. - to_worker: mpsc::UnboundedSender>, + to_worker: mpsc::UnboundedSender>, /// Marker to pin the `H` generic. Serves no purpose except to not break backwards /// compatibility. _marker: PhantomData, } -impl, H: ExHashT> NetworkWorker { +impl NetworkWorker { /// Creates the network service. /// /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order /// for the network processing to advance. From it, you can extract a `NetworkService` using /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(params: Params) -> Result, Error> { + pub fn new(params: Params) -> Result, Error> { let (to_worker, from_worker) = mpsc::unbounded(); if let Some(ref path) = params.network_config.net_config_path { @@ -205,17 +205,17 @@ impl, H: ExHashT> NetworkWorker }, params.chain.clone(), checker.clone(), - params.specialization, params.transaction_pool, params.finality_proof_provider.clone(), params.finality_proof_request_builder, params.protocol_id.clone(), peerset_config, - params.block_announce_validator + params.block_announce_validator, + params.metrics_registry.as_ref() )?; // Build the swarm. - let (mut swarm, bandwidth): (Swarm::, _) = { + let (mut swarm, bandwidth): (Swarm::, _) = { let user_agent = format!( "{} ({})", params.network_config.client_version, @@ -263,14 +263,14 @@ impl, H: ExHashT> NetworkWorker // Listen on multiaddresses. for addr in ¶ms.network_config.listen_addresses { - if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { + if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) } } // Add external addresses. for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone()); + Swarm::::add_external_address(&mut swarm, addr.clone()); } let external_addresses = Arc::new(Mutex::new(Vec::new())); @@ -296,6 +296,10 @@ impl, H: ExHashT> NetworkWorker from_worker, light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), event_streams: Vec::new(), + metrics: match params.metrics_registry { + Some(registry) => Some(Metrics::register(®istry)?), + None => None + } }) } @@ -351,13 +355,13 @@ impl, H: ExHashT> NetworkWorker /// Return a `NetworkService` that can be shared through the code base and can be used to /// manipulate the worker. - pub fn service(&self) -> &Arc> { + pub fn service(&self) -> &Arc> { &self.service } /// You must call this when a new block is imported by the client. - pub fn on_block_imported(&mut self, hash: B::Hash, header: B::Header, data: Vec, is_best: bool) { - self.network_service.user_protocol_mut().on_block_imported(hash, &header, data, is_best); + pub fn on_block_imported(&mut self, header: B::Header, data: Vec, is_best: bool) { + self.network_service.user_protocol_mut().on_block_imported(&header, data, is_best); } /// You must call this when a new block is finalized by the client. @@ -415,9 +419,9 @@ impl, H: ExHashT> NetworkWorker }; NetworkState { - peer_id: Swarm::::local_peer_id(&swarm).to_base58(), - listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), - external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + peer_id: Swarm::::local_peer_id(&swarm).to_base58(), + listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), + external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), average_download_per_sec: self.service.bandwidth.average_download_per_sec(), average_upload_per_sec: self.service.bandwidth.average_upload_per_sec(), connected_peers, @@ -446,7 +450,7 @@ impl, H: ExHashT> NetworkWorker } } -impl, H: ExHashT> NetworkService { +impl NetworkService { /// Writes a message on an open notifications channel. Has no effect if the notifications /// channel with this protocol name is closed. /// @@ -490,9 +494,11 @@ impl, H: ExHashT> NetworkServic pub fn register_notifications_protocol( &self, engine_id: ConsensusEngineId, + protocol_name: impl Into>, ) { let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, + protocol_name: protocol_name.into(), }); } @@ -543,15 +549,6 @@ impl, H: ExHashT> NetworkServic .unbounded_send(ServiceToWorkerMsg::RequestJustification(hash.clone(), number)); } - /// Execute a closure with the chain-specific network specialization. - pub fn with_spec(&self, f: F) - where F: FnOnce(&mut S, &mut dyn Context) + Send + 'static - { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::ExecuteWithSpec(Box::new(f))); - } - /// Are we in the process of downloading the chain? pub fn is_major_syncing(&self) -> bool { self.is_major_syncing.load(Ordering::Relaxed) @@ -639,8 +636,8 @@ impl, H: ExHashT> NetworkServic } } -impl, H: ExHashT> sp_consensus::SyncOracle - for NetworkService +impl sp_consensus::SyncOracle + for NetworkService { fn is_major_syncing(&mut self) -> bool { NetworkService::is_major_syncing(self) @@ -651,8 +648,8 @@ impl, H: ExHashT> sp_consensus: } } -impl<'a, B: BlockT + 'static, S: NetworkSpecialization, H: ExHashT> sp_consensus::SyncOracle - for &'a NetworkService +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle + for &'a NetworkService { fn is_major_syncing(&mut self) -> bool { NetworkService::is_major_syncing(self) @@ -672,10 +669,9 @@ pub trait NetworkStateInfo { fn local_peer_id(&self) -> PeerId; } -impl NetworkStateInfo for NetworkService +impl NetworkStateInfo for NetworkService where B: sp_runtime::traits::Block, - S: NetworkSpecialization, H: ExHashT, { /// Returns the local external addresses. @@ -692,12 +688,11 @@ impl NetworkStateInfo for NetworkService /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. -enum ServiceToWorkerMsg> { +enum ServiceToWorkerMsg { PropagateExtrinsic(H), PropagateExtrinsics, RequestJustification(B::Hash, NumberFor), AnnounceBlock(B::Hash, Vec), - ExecuteWithSpec(Box) + Send>), GetValue(record::Key), PutValue(record::Key, Vec), AddKnownAddress(PeerId, Multiaddr), @@ -710,6 +705,7 @@ enum ServiceToWorkerMsg> { }, RegisterNotifProtocol { engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, }, DisconnectPeer(PeerId), } @@ -718,7 +714,7 @@ enum ServiceToWorkerMsg> { /// /// You are encouraged to poll this in a separate background thread or task. #[must_use = "The NetworkWorker must be polled in order for the network to work"] -pub struct NetworkWorker, H: ExHashT> { +pub struct NetworkWorker { /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. external_addresses: Arc>>, /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. @@ -726,20 +722,123 @@ pub struct NetworkWorker, H: Ex /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. is_major_syncing: Arc, /// The network service that can be extracted and shared through the codebase. - service: Arc>, + service: Arc>, /// The *actual* network. - network_service: Swarm, + network_service: Swarm, /// The import queue that was passed as initialization. import_queue: Box>, /// Messages from the `NetworkService` and that must be processed. - from_worker: mpsc::UnboundedReceiver>, + from_worker: mpsc::UnboundedReceiver>, /// Receiver for queries from the light client that must be processed. light_client_rqs: Option>>, /// Senders for events that happen on the network. event_streams: Vec>, + /// Prometheus network metrics. + metrics: Option, +} + +struct Metrics { + // This list is ordered alphabetically + connections: Gauge, + import_queue_blocks_submitted: Counter, + import_queue_finality_proofs_submitted: Counter, + import_queue_justifications_submitted: Counter, + is_major_syncing: Gauge, + kbuckets_num_nodes: Gauge, + network_per_sec_bytes: GaugeVec, + notifications_total: CounterVec, + num_event_stream_channels: Gauge, + opened_notification_streams: GaugeVec, + peers_count: Gauge, + peerset_num_discovered: Gauge, + peerset_num_requested: Gauge, + random_kademalia_queries_total: Counter, +} + +impl Metrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + // This list is ordered alphabetically + connections: register(Gauge::new( + "sub_libp2p_connections", "Number of libp2p connections" + )?, registry)?, + import_queue_blocks_submitted: register(Counter::new( + "import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, registry)?, + import_queue_finality_proofs_submitted: register(Counter::new( + "import_queue_finality_proofs_submitted", + "Number of finality proofs submitted to the import queue.", + )?, registry)?, + import_queue_justifications_submitted: register(Counter::new( + "import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, registry)?, + is_major_syncing: register(Gauge::new( + "sub_libp2p_is_major_syncing", "Whether the node is performing a major sync or not.", + )?, registry)?, + kbuckets_num_nodes: register(Gauge::new( + "sub_libp2p_kbuckets_num_nodes", "Number of nodes in the Kademlia k-buckets" + )?, registry)?, + network_per_sec_bytes: register(GaugeVec::new( + Opts::new( + "sub_libp2p_network_per_sec_bytes", + "Average bandwidth usage per second" + ), + &["direction"] + )?, registry)?, + notifications_total: register(CounterVec::new( + Opts::new( + "sub_libp2p_notifications_total", + "Number of notification received from all nodes" + ), + &["direction", "protocol"] + )?, registry)?, + num_event_stream_channels: register(Gauge::new( + "sub_libp2p_num_event_stream_channels", + "Number of internal active channels that broadcast network events", + )?, registry)?, + opened_notification_streams: register(GaugeVec::new( + Opts::new( + "sub_libp2p_opened_notification_streams", + "Number of open notification substreams" + ), + &["protocol"] + )?, registry)?, + peers_count: register(Gauge::new( + "sub_libp2p_peers_count", "Number of network gossip peers", + )?, registry)?, + peerset_num_discovered: register(Gauge::new( + "sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", + )?, registry)?, + peerset_num_requested: register(Gauge::new( + "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", + )?, registry)?, + random_kademalia_queries_total: register(Counter::new( + "sub_libp2p_random_kademalia_queries_total", "Number of random Kademlia queries started", + )?, registry)?, + }) + } + + fn update_with_network_event(&self, event: &Event) { + match event { + Event::NotificationStreamOpened { engine_id, .. } => { + self.opened_notification_streams.with_label_values(&[&engine_id_to_string(&engine_id)]).inc(); + }, + Event::NotificationStreamClosed { engine_id, .. } => { + self.opened_notification_streams.with_label_values(&[&engine_id_to_string(&engine_id)]).dec(); + }, + Event::NotificationsReceived { messages, .. } => { + for (engine_id, _) in messages { + self.notifications_total.with_label_values(&["in", &engine_id_to_string(&engine_id)]).inc(); + } + }, + _ => {} + } + } } -impl, H: ExHashT> Future for NetworkWorker { +impl Future for NetworkWorker { type Output = Result<(), io::Error>; fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { @@ -766,11 +865,6 @@ impl, H: ExHashT> Future for Ne }; match msg { - ServiceToWorkerMsg::ExecuteWithSpec(task) => { - let protocol = this.network_service.user_protocol_mut(); - let (mut context, spec) = protocol.specialization_lock(); - task(spec, &mut context); - }, ServiceToWorkerMsg::AnnounceBlock(hash, data) => this.network_service.user_protocol_mut().announce_block(hash, data), ServiceToWorkerMsg::RequestJustification(hash, number) => @@ -789,10 +883,15 @@ impl, H: ExHashT> Future for Ne this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), - ServiceToWorkerMsg::WriteNotification { message, engine_id, target } => - this.network_service.user_protocol_mut().write_notification(target, engine_id, message), - ServiceToWorkerMsg::RegisterNotifProtocol { engine_id } => { - let events = this.network_service.user_protocol_mut().register_notifications_protocol(engine_id); + ServiceToWorkerMsg::WriteNotification { message, engine_id, target } => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.notifications_total.with_label_values(&["out", &engine_id_to_string(&engine_id)]).inc(); + } + this.network_service.user_protocol_mut().write_notification(target, engine_id, message) + }, + ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { + let events = this.network_service.user_protocol_mut() + .register_notifications_protocol(engine_id, protocol_name); for event in events { this.event_streams.retain(|sender| sender.unbounded_send(event.clone()).is_ok()); } @@ -810,18 +909,47 @@ impl, H: ExHashT> Future for Ne match poll_value { Poll::Pending => break, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => - this.import_queue.import_blocks(origin, blocks), - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => - this.import_queue.import_justification(origin, hash, nb, justification), - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => - this.import_queue.import_finality_proof(origin, hash, nb, proof), - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => - this.event_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()), - Poll::Ready(SwarmEvent::Connected(peer_id)) => - trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id), - Poll::Ready(SwarmEvent::Disconnected(peer_id)) => - trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?})", peer_id), + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_blocks_submitted.inc(); + } + this.import_queue.import_blocks(origin, blocks); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_justifications_submitted.inc(); + } + this.import_queue.import_justification(origin, hash, nb, justification); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_finality_proofs_submitted.inc(); + } + this.import_queue.import_finality_proof(origin, hash, nb, proof); + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted)) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.random_kademalia_queries_total.inc(); + } + }, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => { + this.event_streams.retain(|sender| sender.unbounded_send(ev.clone()).is_ok()); + if let Some(metrics) = this.metrics.as_ref() { + metrics.update_with_network_event(&ev); + } + }, + Poll::Ready(SwarmEvent::Connected(peer_id)) => { + trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + if let Some(metrics) = this.metrics.as_ref() { + metrics.connections.inc(); + } + }, + Poll::Ready(SwarmEvent::Disconnected(peer_id)) => { + trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?})", peer_id); + if let Some(metrics) = this.metrics.as_ref() { + metrics.connections.dec(); + } + }, Poll::Ready(SwarmEvent::NewListenAddr(addr)) => trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr), Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => @@ -833,35 +961,58 @@ impl, H: ExHashT> Future for Ne }; } + let num_connected_peers = this.network_service.user_protocol_mut().num_connected_peers(); + // Update the variables shared with the `NetworkService`. - this.num_connected.store(this.network_service.user_protocol_mut().num_connected_peers(), Ordering::Relaxed); + this.num_connected.store(num_connected_peers, Ordering::Relaxed); { - let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); + let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); *this.external_addresses.lock() = external_addresses; } - this.is_major_syncing.store(match this.network_service.user_protocol_mut().sync_state() { + + let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { SyncState::Idle => false, SyncState::Downloading => true, - }, Ordering::Relaxed); + }; + + this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); + + if let Some(metrics) = this.metrics.as_ref() { + metrics.network_per_sec_bytes.with_label_values(&["in"]).set(this.service.bandwidth.average_download_per_sec()); + metrics.network_per_sec_bytes.with_label_values(&["out"]).set(this.service.bandwidth.average_upload_per_sec()); + metrics.is_major_syncing.set(is_major_syncing as u64); + metrics.kbuckets_num_nodes.set(this.network_service.num_kbuckets_entries() as u64); + metrics.num_event_stream_channels.set(this.event_streams.len() as u64); + metrics.peers_count.set(num_connected_peers as u64); + metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); + metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); + } Poll::Pending } } -impl, H: ExHashT> Unpin for NetworkWorker { +impl Unpin for NetworkWorker { +} + +/// Turns a `ConsensusEngineId` into a representable string. +fn engine_id_to_string(id: &ConsensusEngineId) -> Cow { + if let Ok(s) = std::str::from_utf8(&id[..]) { + Cow::Borrowed(s) + } else { + Cow::Owned(format!("{:?}", id)) + } } /// The libp2p swarm, customized for our needs. -type Swarm = libp2p::swarm::Swarm< - Behaviour ->; +type Swarm = libp2p::swarm::Swarm>; // Implementation of `import_queue::Link` trait using the available local variables. -struct NetworkLink<'a, B: BlockT, S: NetworkSpecialization, H: ExHashT> { - protocol: &'a mut Swarm, +struct NetworkLink<'a, B: BlockT, H: ExHashT> { + protocol: &'a mut Swarm, } -impl<'a, B: BlockT, S: NetworkSpecialization, H: ExHashT> Link for NetworkLink<'a, B, S, H> { +impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { fn blocks_processed( &mut self, imported: usize, diff --git a/client/network/test/Cargo.toml b/client/network/test/Cargo.toml index 339279c704828376d460e17f6480005a1849f023..7fec4f4da8b47ccca2df942dadf729e8357583dd 100644 --- a/client/network/test/Cargo.toml +++ b/client/network/test/Cargo.toml @@ -1,30 +1,31 @@ [package] description = "Integration tests for Substrate network protocol" name = "sc-network-test" -version = "0.8.0" +version = "0.8.0-dev" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sc-network = { version = "0.8", path = "../" } +sc-network = { version = "0.8.0-alpha.2", path = "../" } log = "0.4.8" parking_lot = "0.10.0" -futures = "0.1.29" -futures03 = { package = "futures", version = "0.3.1", features = ["compat"] } +futures = "0.3.1" futures-timer = "3.0.1" rand = "0.7.2" -libp2p = { version = "0.16.0", default-features = false, features = ["libp2p-websocket"] } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8", path = "../../" } -sc-client-api = { version = "2.0.0", path = "../../api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sc-block-builder = { version = "0.8", path = "../../block-builder" } -sp-consensus-babe = { version = "0.8", path = "../../../primitives/consensus/babe" } +libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sc-client = { version = "0.8.0-alpha.2", path = "../../" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../api" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../../block-builder" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/babe" } env_logger = "0.7.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } tempfile = "3.1.0" -tokio = "0.1.22" diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index c15bd3365503f733801e6c3e3e066e8be1c5149a..0add6c63d5ac974fae8277ea6d44f5a76b6b2604 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -21,11 +21,11 @@ mod block_import; #[cfg(test)] mod sync; -use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData}; +use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}}; use libp2p::build_multiaddr; use log::trace; -use sc_network::FinalityProofProvider; +use sc_network::config::FinalityProofProvider; use sp_blockchain::{ Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo, }; @@ -35,7 +35,7 @@ use sc_client_api::{ FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, }; -use sc_block_builder::BlockBuilder; +use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; use sc_client::LongestChain; use sc_network::config::Roles; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; @@ -46,23 +46,19 @@ use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::Error as ConsensusError; use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; use futures::prelude::*; -use futures03::{Future as _, FutureExt as _, TryFutureExt as _, StreamExt as _, TryStreamExt as _}; use sc_network::{NetworkWorker, NetworkStateInfo, NetworkService, ReportHandle, config::ProtocolId}; use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; use libp2p::PeerId; use parking_lot::Mutex; use sp_core::H256; -use sc_network::ProtocolConfig; +use sc_network::config::{ProtocolConfig, TransactionPool}; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::Justification; -use sc_network::TransactionPool; -use sc_network::specialization::NetworkSpecialization; use substrate_test_runtime_client::{self, AccountKeyring}; pub use substrate_test_runtime_client::runtime::{Block, Extrinsic, Hash, Transfer}; pub use substrate_test_runtime_client::{TestClient, TestClientBuilder, TestClientBuilderExt}; -pub use sc_network::specialization::DummySpecialization; type AuthorityId = sp_consensus_babe::AuthorityId; @@ -178,7 +174,7 @@ impl PeersClient { } } -pub struct Peer> { +pub struct Peer { pub data: D, client: PeersClient, /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, @@ -189,12 +185,12 @@ pub struct Peer> { block_import: BlockImportAdapter<()>, select_chain: Option>, backend: Option>, - network: NetworkWorker::Hash>, - imported_blocks_stream: Box, Error = ()> + Send>, - finality_notification_stream: Box, Error = ()> + Send>, + network: NetworkWorker::Hash>, + imported_blocks_stream: Pin> + Send>>, + finality_notification_stream: Pin> + Send>>, } -impl> Peer { +impl Peer { /// Get this peer ID. pub fn id(&self) -> PeerId { self.network.service().local_peer_id() @@ -240,7 +236,7 @@ impl> Peer { where F: FnMut(BlockBuilder) -> Block { let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block) + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) } /// Add blocks to the peer -- edit the block before adding. The chain will @@ -250,7 +246,8 @@ impl> Peer { at: BlockId, count: usize, origin: BlockOrigin, - mut edit_block: F + mut edit_block: F, + headers_only: bool, ) -> H256 where F: FnMut(BlockBuilder) -> Block { let full_client = self.client.as_full() .expect("blocks could only be generated by full clients"); @@ -275,7 +272,7 @@ impl> Peer { origin, header.clone(), None, - Some(block.extrinsics) + if headers_only { None } else { Some(block.extrinsics) }, ).unwrap(); let cache = if let Some(cache) = cache { cache.into_iter().collect() @@ -283,7 +280,7 @@ impl> Peer { Default::default() }; self.block_import.import_block(import_block, cache).expect("block_import failed"); - self.network.on_block_imported(hash, header, Vec::new(), true); + self.network.on_block_imported(header, Vec::new(), true); at = hash; } @@ -297,28 +294,46 @@ impl> Peer { self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) } + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_headers(&mut self, count: usize) -> H256 { + let best_hash = self.client.info().best_hash; + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + } + /// Push blocks to the peer (simplified: with or without a TX) starting from /// given hash. pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false) + } + + /// Push blocks/headers to the peer (simplified: with or without a TX) starting from + /// given hash. + fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { let mut nonce = 0; if with_tx { - self.generate_blocks_at(at, count, BlockOrigin::File, |mut builder| { - let transfer = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Alice.into(), - amount: 1, - nonce, - }; - builder.push(transfer.into_signed_tx()).unwrap(); - nonce = nonce + 1; - builder.build().unwrap().block - }) + self.generate_blocks_at( + at, + count, + BlockOrigin::File, |mut builder| { + let transfer = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Alice.into(), + amount: 1, + nonce, + }; + builder.push(transfer.into_signed_tx()).unwrap(); + nonce = nonce + 1; + builder.build().unwrap().block + }, + headers_only + ) } else { self.generate_blocks_at( at, count, BlockOrigin::File, |builder| builder.build().unwrap().block, + headers_only, ) } } @@ -336,7 +351,7 @@ impl> Peer { } /// Get a reference to the network service. - pub fn network_service(&self) -> &Arc::Hash>> { + pub fn network_service(&self) -> &Arc::Hash>> { &self.network.service() } @@ -392,16 +407,6 @@ impl TransactionPool for EmptyTransactionPool { fn transaction(&self, _h: &Hash) -> Option { None } } -pub trait SpecializationFactory { - fn create() -> Self; -} - -impl SpecializationFactory for DummySpecialization { - fn create() -> DummySpecialization { - DummySpecialization - } -} - /// Implements `BlockImport` for any `Transaction`. Internally the transaction is /// "converted", aka the field is set to `None`. /// @@ -522,7 +527,6 @@ impl VerifierAdapter { } pub trait TestNetFactory: Sized { - type Specialization: NetworkSpecialization + SpecializationFactory; type Verifier: 'static + Verifier; type PeerData: Default; @@ -536,9 +540,9 @@ pub trait TestNetFactory: Sized { ) -> Self::Verifier; /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>( &mut self, closure: F, ); @@ -636,8 +640,8 @@ pub trait TestNetFactory: Sized { transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), import_queue, - specialization: self::SpecializationFactory::create(), - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())) + block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + metrics_registry: None, }).unwrap(); self.mut_peers(|peers| { @@ -645,10 +649,8 @@ pub trait TestNetFactory: Sized { peer.network.add_known_address(network.service().local_peer_id(), listen_addr.clone()); } - let imported_blocks_stream = Box::new(client.import_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat().fuse()); - let finality_notification_stream = Box::new(client.finality_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat().fuse()); + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, @@ -712,8 +714,8 @@ pub trait TestNetFactory: Sized { transaction_pool: Arc::new(EmptyTransactionPool), protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), import_queue, - specialization: self::SpecializationFactory::create(), - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())) + block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + metrics_registry: None, }).unwrap(); self.mut_peers(|peers| { @@ -721,10 +723,8 @@ pub trait TestNetFactory: Sized { peer.network.add_known_address(network.service().local_peer_id(), listen_addr.clone()); } - let imported_blocks_stream = Box::new(client.import_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat().fuse()); - let finality_notification_stream = Box::new(client.finality_notification_stream() - .map(|v| Ok::<_, ()>(v)).compat().fuse()); + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); peers.push(Peer { data, @@ -743,48 +743,71 @@ pub trait TestNetFactory: Sized { /// Polls the testnet until all nodes are in sync. /// /// Must be executed in a task context. - fn poll_until_sync(&mut self) -> Async<()> { - self.poll(); + fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); // Return `NotReady` if there's a mismatch in the highest block number. let mut highest = None; for peer in self.peers().iter() { if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Async::NotReady + return Poll::Pending } if peer.network.num_sync_requests() != 0 { - return Async::NotReady + return Poll::Pending } match (highest, peer.client.info().best_hash) { (None, b) => highest = Some(b), (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Async::NotReady, + (Some(_), _) => return Poll::Pending } } - Async::Ready(()) + Poll::Ready(()) + } + + /// Polls the testnet until theres' no activiy of any kind. + /// + /// Must be executed in a task context. + fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + for peer in self.peers().iter() { + if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { + return Poll::Pending + } + if peer.network.num_sync_requests() != 0 { + return Poll::Pending + } + } + Poll::Ready(()) } /// Blocks the current thread until we are sync'ed. /// - /// Calls `poll_until_sync` repeatedly with the runtime passed as parameter. - fn block_until_sync(&mut self, runtime: &mut tokio::runtime::current_thread::Runtime) { - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| Ok(self.poll_until_sync()))).unwrap(); + /// Calls `poll_until_sync` repeatedly. + fn block_until_sync(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); + } + + /// Blocks the current thread until there are no pending packets. + /// + /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. + fn block_until_idle(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); } /// Polls the testnet. Processes all the pending actions and returns `NotReady`. - fn poll(&mut self) { + fn poll(&mut self, cx: &mut FutureContext) { self.mut_peers(|peers| { for peer in peers { trace!(target: "sync", "-- Polling {}", peer.id()); - futures03::future::poll_fn(|cx| Pin::new(&mut peer.network).poll(cx)) - .map(|item| Ok::<_, ()>(item)) - .compat().poll().unwrap(); + if let Poll::Ready(res) = Pin::new(&mut peer.network).poll(cx) { + res.unwrap(); + } trace!(target: "sync", "-- Polling complete {}", peer.id()); // We poll `imported_blocks_stream`. - while let Ok(Async::Ready(Some(notification))) = peer.imported_blocks_stream.poll() { + while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { peer.network.on_block_imported( - notification.hash, notification.header, Vec::new(), true, @@ -793,7 +816,7 @@ pub trait TestNetFactory: Sized { // We poll `finality_notification_stream`, but we only take the last event. let mut last = None; - while let Ok(Async::Ready(Some(item))) = peer.finality_notification_stream.poll() { + while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { last = Some(item); } if let Some(notification) = last { @@ -805,11 +828,10 @@ pub trait TestNetFactory: Sized { } pub struct TestNet { - peers: Vec>, + peers: Vec>, } impl TestNetFactory for TestNet { - type Specialization = DummySpecialization; type Verifier = PassThroughVerifier; type PeerData = (); @@ -826,15 +848,15 @@ impl TestNetFactory for TestNet { PassThroughVerifier(false) } - fn peer(&mut self, i: usize) -> &mut Peer<(), Self::Specialization> { + fn peer(&mut self, i: usize) -> &mut Peer<()> { &mut self.peers[i] } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { &self.peers } - fn mut_peers>)>(&mut self, closure: F) { + fn mut_peers>)>(&mut self, closure: F) { closure(&mut self.peers); } } @@ -858,7 +880,6 @@ impl JustificationImport for ForceFinalized { pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { - type Specialization = DummySpecialization; type Verifier = PassThroughVerifier; type PeerData = (); @@ -870,17 +891,16 @@ impl TestNetFactory for JustificationTestNet { self.0.make_verifier(client, config, peer_data) } - fn peer(&mut self, i: usize) -> &mut Peer { + fn peer(&mut self, i: usize) -> &mut Peer { self.0.peer(i) } - fn peers(&self) -> &Vec> { + fn peers(&self) -> &Vec> { self.0.peers() } fn mut_peers>, + &mut Vec>, )>(&mut self, closure: F) { self.0.mut_peers(closure) } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 210a4fb38bb68949edfd44925e8feb0aca13b898..388257516832002de97775bbeb21c6696eaae38c 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -16,14 +16,12 @@ use sc_network::config::Roles; use sp_consensus::BlockOrigin; -use futures03::TryFutureExt as _; use std::time::Duration; -use tokio::runtime::current_thread; +use futures::executor::block_on; use super::*; fn test_ancestor_search_when_common_is(n: usize) { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(n, false); @@ -34,7 +32,7 @@ fn test_ancestor_search_when_common_is(n: usize) { net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); } @@ -42,24 +40,22 @@ fn test_ancestor_search_when_common_is(n: usize) { #[test] fn sync_peers_works() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); for peer in 0..3 { if net.peer(peer).num_peers() != 2 { - return Ok(Async::NotReady) + return Poll::Pending } } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); } #[test] fn sync_cycle_from_offline_to_syncing_to_offline() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); for peer in 0..3 { // Offline, and not major syncing. @@ -71,51 +67,50 @@ fn sync_cycle_from_offline_to_syncing_to_offline() { net.peer(2).push_blocks(100, false); // Block until all nodes are online and nodes 0 and 1 and major syncing. - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); for peer in 0..3 { // Online if net.peer(peer).is_offline() { - return Ok(Async::NotReady) + return Poll::Pending } if peer < 2 { // Major syncing. if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { - return Ok(Async::NotReady) + return Poll::Pending } } } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); // Block until all nodes are done syncing. - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); for peer in 0..3 { if net.peer(peer).is_major_syncing() { - return Ok(Async::NotReady) + return Poll::Pending } } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); // Now drop nodes 1 and 2, and check that node 0 is offline. net.peers.remove(2); net.peers.remove(1); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if !net.peer(0).is_offline() { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); } #[test] fn syncing_node_not_major_syncing_when_disconnected() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); // Generate blocks. @@ -125,36 +120,35 @@ fn syncing_node_not_major_syncing_when_disconnected() { assert!(!net.peer(1).is_major_syncing()); // Check that we switch to major syncing. - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if !net.peer(1).is_major_syncing() { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); // Destroy two nodes, and check that we switch to non-major syncing. net.peers.remove(2); net.peers.remove(0); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(0).is_major_syncing() { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); } #[test] fn sync_from_two_peers_works() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); assert!(!net.peer(0).is_major_syncing()); @@ -163,12 +157,11 @@ fn sync_from_two_peers_works() { #[test] fn sync_from_two_peers_with_ancestry_search_works() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(10, true); net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); } @@ -176,14 +169,13 @@ fn sync_from_two_peers_with_ancestry_search_works() { #[test] fn ancestry_search_works_when_backoff_is_one() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(1, false); net.peer(1).push_blocks(2, false); net.peer(2).push_blocks(2, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); } @@ -191,14 +183,13 @@ fn ancestry_search_works_when_backoff_is_one() { #[test] fn ancestry_search_works_when_ancestor_is_genesis() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(13, true); net.peer(1).push_blocks(100, false); net.peer(2).push_blocks(100, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); } @@ -221,10 +212,9 @@ fn ancestry_search_works_when_common_is_hundred() { #[test] fn sync_long_chain_works() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(2); net.peer(1).push_blocks(500, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); } @@ -232,18 +222,17 @@ fn sync_long_chain_works() { #[test] fn sync_no_common_longer_chain_fails() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(20, true); net.peer(1).push_blocks(20, false); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(0).is_major_syncing() { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); let peer1 = &net.peers()[1]; assert!(!net.peers()[0].blockchain_canon_equals(peer1)); } @@ -251,10 +240,9 @@ fn sync_no_common_longer_chain_fails() { #[test] fn sync_justifications() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = JustificationTestNet::new(3); net.peer(0).push_blocks(20, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); // there's currently no justification for block #10 assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); @@ -274,26 +262,25 @@ fn sync_justifications() { net.peer(1).request_justification(&h2.hash().into(), 15); net.peer(1).request_justification(&h3.hash().into(), 20); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); for height in (10..21).step_by(5) { if net.peer(0).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Ok(Async::NotReady); + return Poll::Pending; } if net.peer(1).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Ok(Async::NotReady); + return Poll::Pending; } } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); } #[test] fn sync_justifications_across_forks() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = JustificationTestNet::new(3); // we push 5 blocks net.peer(0).push_blocks(5, false); @@ -303,30 +290,29 @@ fn sync_justifications_across_forks() { // peer 1 will only see the longer fork. but we'll request justifications // for both and finalize the small fork instead. - net.block_until_sync(&mut runtime); + net.block_until_sync(); net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); net.peer(1).request_justification(&f1_best, 10); net.peer(1).request_justification(&f2_best, 11); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(0).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) && net.peer(1).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) { - Ok(Async::Ready(())) + Poll::Ready(()) } else { - Ok(Async::NotReady) + Poll::Pending } - })).unwrap(); + })); } #[test] fn sync_after_fork_works() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -340,7 +326,7 @@ fn sync_after_fork_works() { net.peer(2).push_blocks(1, false); // peer 1 has the best chain - net.block_until_sync(&mut runtime); + net.block_until_sync(); let peer1 = &net.peers()[1]; assert!(net.peers()[0].blockchain_canon_equals(peer1)); (net.peers()[1].blockchain_canon_equals(peer1)); @@ -350,7 +336,6 @@ fn sync_after_fork_works() { #[test] fn syncs_all_forks() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(4); net.peer(0).push_blocks(2, false); net.peer(1).push_blocks(2, false); @@ -358,7 +343,7 @@ fn syncs_all_forks() { net.peer(0).push_blocks(2, true); net.peer(1).push_blocks(4, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); // Check that all peers have all of the blocks. assert_eq!(9, net.peer(0).blocks_count()); assert_eq!(9, net.peer(1).blocks_count()); @@ -367,12 +352,11 @@ fn syncs_all_forks() { #[test] fn own_blocks_are_announced() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); - net.block_until_sync(&mut runtime); // connect'em + net.block_until_sync(); // connect'em net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); - net.block_until_sync(&mut runtime); + net.block_until_sync(); assert_eq!(net.peer(0).client.info().best_number, 1); assert_eq!(net.peer(1).client.info().best_number, 1); @@ -384,7 +368,6 @@ fn own_blocks_are_announced() { #[test] fn blocks_are_not_announced_by_light_nodes() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(0); // full peer0 is connected to light peer @@ -397,7 +380,7 @@ fn blocks_are_not_announced_by_light_nodes() { // Sync between 0 and 1. net.peer(0).push_blocks(1, false); assert_eq!(net.peer(0).client.info().best_number, 1); - net.block_until_sync(&mut runtime); + net.block_until_sync(); assert_eq!(net.peer(1).client.info().best_number, 1); // Add another node and remove node 0. @@ -405,18 +388,17 @@ fn blocks_are_not_announced_by_light_nodes() { net.peers.remove(0); // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. - let mut delay = futures_timer::Delay::new(Duration::from_secs(5)).unit_error().compat(); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| { - net.poll(); - delay.poll().map_err(|_| ()) - })).unwrap(); + let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut delay).poll(cx) + })); assert_eq!(net.peer(1).client.info().best_number, 0); } #[test] fn can_sync_small_non_best_forks() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(2); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -435,14 +417,14 @@ fn can_sync_small_non_best_forks() { assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(0).num_peers() == 0 { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -455,32 +437,31 @@ fn can_sync_small_non_best_forks() { // after announcing, peer 1 downloads the block. - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Ok(Async::NotReady) + return Poll::Pending } - Ok(Async::Ready(())) - })).unwrap(); - net.block_until_sync(&mut runtime); + Poll::Ready(()) + })); + net.block_until_sync(); let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); net.peer(0).announce_block(another_fork, Vec::new()); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { - return Ok(Async::NotReady) + return Poll::Pending } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); } #[test] fn can_not_sync_from_light_peer() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); // given the network with 1 full nodes (#0) and 1 light node (#1) let mut net = TestNet::new(1); @@ -490,7 +471,7 @@ fn can_not_sync_from_light_peer() { net.peer(0).push_blocks(1, false); // and let the light client sync from this node - net.block_until_sync(&mut runtime); + net.block_until_sync(); // ensure #0 && #1 have the same best block let full0_info = net.peer(0).client.info(); @@ -504,29 +485,28 @@ fn can_not_sync_from_light_peer() { net.peers.remove(0); // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds - let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)).unit_error().compat(); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); - test_finished.poll().map_err(|_| ()) - })).unwrap(); + let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut test_finished).poll(cx) + })); } #[test] fn light_peer_imports_header_from_announce() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); - fn import_with_announce(net: &mut TestNet, runtime: &mut current_thread::Runtime, hash: H256) { + fn import_with_announce(net: &mut TestNet, hash: H256) { net.peer(0).announce_block(hash, Vec::new()); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { - Ok(Async::Ready(())) + Poll::Ready(()) } else { - Ok(Async::NotReady) + Poll::Pending } - })).unwrap(); + })); } // given the network with 1 full nodes (#0) and 1 light node (#1) @@ -534,21 +514,20 @@ fn light_peer_imports_header_from_announce() { net.add_light_peer(&Default::default()); // let them connect to each other - net.block_until_sync(&mut runtime); + net.block_until_sync(); // check that NEW block is imported from announce message let new_hash = net.peer(0).push_blocks(1, false); - import_with_announce(&mut net, &mut runtime, new_hash); + import_with_announce(&mut net, new_hash); // check that KNOWN STALE block is imported from announce message let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); - import_with_announce(&mut net, &mut runtime, known_stale_hash); + import_with_announce(&mut net, known_stale_hash); } #[test] fn can_sync_explicit_forks() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(2); net.peer(0).push_blocks(30, false); net.peer(1).push_blocks(30, false); @@ -568,14 +547,14 @@ fn can_sync_explicit_forks() { assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); // poll until the two nodes connect, otherwise announcing the block will not work - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Ok(Async::NotReady) + Poll::Pending } else { - Ok(Async::Ready(())) + Poll::Ready(()) } - })).unwrap(); + })); // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. @@ -589,21 +568,20 @@ fn can_sync_explicit_forks() { net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number); // peer 1 downloads the block. - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Ok(Async::NotReady) + return Poll::Pending } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); } #[test] fn syncs_header_only_forks() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(0); let config = ProtocolConfig::default(); net.add_full_peer_with_states(&config, None); @@ -616,7 +594,7 @@ fn syncs_header_only_forks() { let small_number = net.peer(0).client().info().best_number; net.peer(1).push_blocks(4, false); - net.block_until_sync(&mut runtime); + net.block_until_sync(); // Peer 1 will sync the small fork even though common block state is missing assert_eq!(9, net.peer(0).blocks_count()); assert_eq!(9, net.peer(1).blocks_count()); @@ -624,19 +602,18 @@ fn syncs_header_only_forks() { // Request explicit header-only sync request for the ancient fork. let first_peer_id = net.peer(0).id(); net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { - net.poll(); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Ok(Async::NotReady) + return Poll::Pending } - Ok(Async::Ready(())) - })).unwrap(); + Poll::Ready(()) + })); } #[test] fn does_not_sync_announced_old_best_block() { let _ = ::env_logger::try_init(); - let mut runtime = current_thread::Runtime::new().unwrap(); let mut net = TestNet::new(3); let old_hash = net.peer(0).push_blocks(1, false); @@ -645,18 +622,38 @@ fn does_not_sync_announced_old_best_block() { net.peer(1).push_blocks(20, true); net.peer(0).announce_block(old_hash, Vec::new()); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { + block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement - net.poll(); - Ok(Async::Ready(())) - })).unwrap(); + net.poll(cx); + Poll::Ready(()) + })); assert!(!net.peer(1).is_major_syncing()); net.peer(0).announce_block(old_hash_with_parent, Vec::new()); - runtime.block_on(futures::future::poll_fn::<(), (), _>(|| -> Result<_, ()> { + block_on(futures::future::poll_fn::<(), _>(|cx| { // poll once to import announcement - net.poll(); - Ok(Async::Ready(())) - })).unwrap(); + net.poll(cx); + Poll::Ready(()) + })); assert!(!net.peer(1).is_major_syncing()); } + +#[test] +fn full_sync_requires_block_body() { + // Check that we don't sync headers-only in full mode. + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + + net.peer(0).push_headers(1); + // Wait for nodes to connect + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + net.block_until_idle(); + assert_eq!(net.peer(1).client.info().best_number, 0); +} diff --git a/client/offchain/Cargo.toml b/client/offchain/Cargo.toml index 29e6dca2a7f95bbad8422186586ac4305b9453cd..9ea0372969b666ee22ad6944a93260e9395b2516 100644 --- a/client/offchain/Cargo.toml +++ b/client/offchain/Cargo.toml @@ -1,41 +1,43 @@ [package] description = "Substrate offchain workers" name = "sc-offchain" -version = "2.0.0" +version = "2.0.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] bytes = "0.5" -sc-client-api = { version = "2.0.0", path = "../api" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } fnv = "1.0.6" futures = "0.3.1" futures-timer = "3.0.1" log = "0.4.8" threadpool = "1.7" num_cpus = "1.10" -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +sp-offchain = { version = "2.0.0-alpha.2", path = "../../primitives/offchain" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } rand = "0.7.2" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-network = { version = "0.8", path = "../network" } -sc-keystore = { version = "2.0.0", path = "../keystore" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } [target.'cfg(not(target_os = "unknown"))'.dependencies] hyper = "0.13.2" hyper-rustls = "0.19" [dev-dependencies] -sc-client-db = { version = "0.8", default-features = true, path = "../db/" } +sc-client-db = { version = "0.8.0-alpha.2", default-features = true, path = "../db/" } env_logger = "0.7.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } tokio = "0.2" -sc-transaction-pool = { version = "2.0.0", path = "../../client/transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../client/transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } [features] default = [] diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 7fc9671fcbcd826c85b69c9414fbaa34e56797da..27a7f508459ba1bbc3cc4ba0eef0ed2a7c5f9770 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -169,6 +169,7 @@ mod tests { use substrate_test_runtime_client::runtime::Block; use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_transaction_pool::{TransactionPool, InPoolTransaction}; + use sc_client_api::ExecutorProvider; struct MockNetworkStateInfo(); diff --git a/client/peerset/Cargo.toml b/client/peerset/Cargo.toml index c17ffdc385ba93a1475181272a8669c99c74edf2..9e76b8015afcadf897faf017a5ba935270cd5eef 100644 --- a/client/peerset/Cargo.toml +++ b/client/peerset/Cargo.toml @@ -3,13 +3,16 @@ description = "Connectivity manager based on reputation" homepage = "http://parity.io" license = "GPL-3.0" name = "sc-peerset" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-peerset" + [dependencies] futures = "0.3.1" -libp2p = { version = "0.16.0", default-features = false } +libp2p = { version = "0.16.2", default-features = false } log = "0.4.8" serde_json = "1.0.41" wasm-timer = "0.2" diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index fb91f1fcf69e09d4c7fec2949e2ee586184b588d..87ed2336aea9c65da74aafbc53216ce0a6ffaff7 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -21,18 +21,22 @@ mod peersstate; use std::{collections::{HashSet, HashMap}, collections::VecDeque}; use futures::{prelude::*, channel::mpsc}; -use libp2p::PeerId; use log::{debug, error, trace}; use serde_json::json; -use std::{pin::Pin, task::Context, task::Poll}; +use std::{pin::Pin, task::{Context, Poll}, time::Duration}; use wasm_timer::Instant; +pub use libp2p::PeerId; + /// We don't accept nodes whose reputation is under this value. const BANNED_THRESHOLD: i32 = 82 * (i32::min_value() / 100); /// Reputation change for a node when we get disconnected from it. const DISCONNECT_REPUTATION_CHANGE: i32 = -256; /// Reserved peers group ID const RESERVED_NODES: &'static str = "reserved"; +/// Amount of time between the moment we disconnect from a node and the moment we remove it from +/// the list. +const FORGET_AFTER: Duration = Duration::from_secs(3600); #[derive(Debug)] enum Action { @@ -45,7 +49,7 @@ enum Action { RemoveFromPriorityGroup(String, PeerId), } -/// Shared handle to the peer set manager (PSM). Distributed around the code. +/// Description of a reputation adjustment for a node. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ReputationChange { /// Reputation delta. @@ -309,10 +313,11 @@ impl Peerset { /// Updates the value of `self.latest_time_update` and performs all the updates that happen /// over time, such as reputation increases for staying connected. fn update_time(&mut self) { + let now = Instant::now(); + // We basically do `(now - self.latest_update).as_secs()`, except that by the way we do it // we know that we're not going to miss seconds because of rounding to integers. let secs_diff = { - let now = Instant::now(); let elapsed_latest = self.latest_time_update - self.created; let elapsed_now = now - self.created; self.latest_time_update = now; @@ -344,10 +349,16 @@ impl Peerset { peer.set_reputation(after) } peersstate::Peer::NotConnected(mut peer) => { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) + if peer.reputation() == 0 && + peer.last_connected_or_discovered() + FORGET_AFTER < now + { + peer.forget_peer(); + } else { + let before = peer.reputation(); + let after = reput_tick(before); + trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); + peer.set_reputation(after) + } } peersstate::Peer::Unknown(_) => unreachable!("We iterate over known peers; qed") }; @@ -413,7 +424,10 @@ impl Peerset { let not_connected = match self.data.peer(&peer_id) { // If we're already connected, don't answer, as the docs mention. peersstate::Peer::Connected(_) => return, - peersstate::Peer::NotConnected(entry) => entry, + peersstate::Peer::NotConnected(mut entry) => { + entry.bump_last_connected_or_discovered(); + entry + }, peersstate::Peer::Unknown(entry) => entry.discover(), }; @@ -504,6 +518,11 @@ impl Peerset { }) } + /// Returns the number of peers that we have discovered. + pub fn num_discovered_peers(&self) -> usize { + self.data.peers().len() + } + /// Returns priority group by id. pub fn get_priority_group(&self, group_id: &str) -> Option> { self.data.get_priority_group(group_id) diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 96a6698734b744436cc846fcc772e2edb53b221a..843ec0a36006f436e3d9b2b9c83175ec632566e5 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -17,8 +17,9 @@ //! Contains the state storage behind the peerset. use libp2p::PeerId; +use log::{error, warn}; use std::{borrow::Cow, collections::{HashSet, HashMap}}; -use log::warn; +use wasm_timer::Instant; /// State storage behind the peerset. /// @@ -69,7 +70,9 @@ struct Node { impl Default for Node { fn default() -> Node { Node { - connection_state: ConnectionState::NotConnected, + connection_state: ConnectionState::NotConnected { + last_connected: Instant::now(), + }, reputation: 0, } } @@ -83,7 +86,11 @@ enum ConnectionState { /// We are connected through an outgoing connection. Out, /// We are not connected to this node. - NotConnected, + NotConnected { + /// When we were last connected to the node, or if we were never connected when we + /// discovered it. + last_connected: Instant, + }, } impl ConnectionState { @@ -92,7 +99,7 @@ impl ConnectionState { match self { ConnectionState::In => true, ConnectionState::Out => true, - ConnectionState::NotConnected => false, + ConnectionState::NotConnected { .. } => false, } } } @@ -137,7 +144,7 @@ impl PeersState { /// Returns the list of all the peers we know of. // Note: this method could theoretically return a `Peer`, but implementing that // isn't simple. - pub fn peers(&self) -> impl Iterator { + pub fn peers(&self) -> impl ExactSizeIterator { self.nodes.keys() } @@ -212,11 +219,13 @@ impl PeersState { match node.connection_state { ConnectionState::In => self.num_in -= 1, ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected => + ConnectionState::NotConnected { .. } => debug_assert!(false, "State inconsistency: disconnecting a disconnected node") } } - node.connection_state = ConnectionState::NotConnected; + node.connection_state = ConnectionState::NotConnected { + last_connected: Instant::now(), + }; } else { warn!(target: "peerset", "Attempting to disconnect unknown peer {}", peer_id); } @@ -292,7 +301,7 @@ impl PeersState { match peer.connection_state { ConnectionState::In => self.num_in += 1, ConnectionState::Out => self.num_out += 1, - ConnectionState::NotConnected => {}, + ConnectionState::NotConnected { .. } => {}, } } } @@ -305,7 +314,7 @@ impl PeersState { match peer.connection_state { ConnectionState::In => self.num_in -= 1, ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected => {}, + ConnectionState::NotConnected { .. } => {}, } } } @@ -467,6 +476,45 @@ impl<'a> NotConnectedPeer<'a> { self.peer_id.into_owned() } + /// Bumps the value that `last_connected_or_discovered` would return to now, even if we + /// didn't connect or disconnect. + pub fn bump_last_connected_or_discovered(&mut self) { + let state = match self.state.nodes.get_mut(&*self.peer_id) { + Some(s) => s, + None => return, + }; + + if let ConnectionState::NotConnected { last_connected } = &mut state.connection_state { + *last_connected = Instant::now(); + } + } + + /// Returns when we were last connected to this peer, or when we discovered it if we were + /// never connected. + /// + /// Guaranteed to be earlier than calling `Instant::now()` after the function returns. + pub fn last_connected_or_discovered(&self) -> Instant { + let state = match self.state.nodes.get(&*self.peer_id) { + Some(s) => s, + None => { + error!( + target: "peerset", + "State inconsistency with {}; not connected after borrow", + self.peer_id + ); + return Instant::now(); + } + }; + + match state.connection_state { + ConnectionState::NotConnected { last_connected } => last_connected, + _ => { + error!(target: "peerset", "State inconsistency with {}", self.peer_id); + Instant::now() + } + } + } + /// Tries to set the peer as connected as an outgoing connection. /// /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If @@ -518,6 +566,22 @@ impl<'a> NotConnectedPeer<'a> { pub fn add_reputation(&mut self, modifier: i32) { self.state.add_reputation(&self.peer_id, modifier) } + + /// Un-discovers the peer. Removes it from the list. + pub fn forget_peer(self) -> UnknownPeer<'a> { + if self.state.nodes.remove(&*self.peer_id).is_none() { + error!( + target: "peerset", + "State inconsistency with {} when forgetting peer", + self.peer_id + ); + } + + UnknownPeer { + parent: self.state, + peer_id: self.peer_id, + } + } } /// A peer that we have never heard of. @@ -533,7 +597,9 @@ impl<'a> UnknownPeer<'a> { /// values using the `NotConnectedPeer` that this method returns. pub fn discover(self) -> NotConnectedPeer<'a> { self.parent.nodes.insert(self.peer_id.clone().into_owned(), Node { - connection_state: ConnectionState::NotConnected, + connection_state: ConnectionState::NotConnected { + last_connected: Instant::now(), + }, reputation: 0, }); diff --git a/client/rpc-api/Cargo.toml b/client/rpc-api/Cargo.toml index b9f4cbb0159ce5e059499c38829860aa7cef5599..0f3b72e519ba5cbff72236a885deb54101347594 100644 --- a/client/rpc-api/Cargo.toml +++ b/client/rpc-api/Cargo.toml @@ -1,12 +1,15 @@ [package] name = "sc-rpc-api" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate RPC interfaces." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } jsonrpc-core = "14.0.3" @@ -15,10 +18,10 @@ jsonrpc-derive = "14.0.3" jsonrpc-pubsub = "14.0.3" log = "0.4.8" parking_lot = "0.10.0" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } -sp-runtime = { path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } +sp-runtime = { path = "../../primitives/runtime" , version = "2.0.0-alpha.2"} serde = { version = "1.0.101", features = ["derive"] } serde_json = "1.0.41" -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +sp-rpc = { version = "2.0.0-alpha.2", path = "../../primitives/rpc" } diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 0c270a3f70528e4b772a639cca98f395a85b8e2b..2ab3851d37663a328e37b4f1d8998d103905d9f3 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -54,6 +54,18 @@ pub trait ChainApi { #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] fn finalized_head(&self) -> Result; + /// All head subscription + #[pubsub(subscription = "chain_allHead", subscribe, name = "chain_subscribeAllHeads")] + fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber

); + + /// Unsubscribe from all head subscription. + #[pubsub(subscription = "chain_allHead", unsubscribe, name = "chain_unsubscribeAllHeads")] + fn unsubscribe_all_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + /// New head subscription #[pubsub( subscription = "chain_newHead", @@ -76,7 +88,7 @@ pub trait ChainApi { id: SubscriptionId, ) -> RpcResult; - /// New head subscription + /// Finalized head subscription #[pubsub( subscription = "chain_finalizedHead", subscribe, @@ -85,7 +97,7 @@ pub trait ChainApi { )] fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - /// Unsubscribe from new head subscription. + /// Unsubscribe from finalized head subscription. #[pubsub( subscription = "chain_finalizedHead", unsubscribe, diff --git a/client/rpc-servers/Cargo.toml b/client/rpc-servers/Cargo.toml index 6b0d3f4adacf8bc3d5d07b28082455916080df07..79d2984c2293e0649456ce5453f0dd719cfe77ad 100644 --- a/client/rpc-servers/Cargo.toml +++ b/client/rpc-servers/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-rpc-server" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate RPC servers." [dependencies] jsonrpc-core = "14.0.3" @@ -11,7 +14,7 @@ pubsub = { package = "jsonrpc-pubsub", version = "14.0.3" } log = "0.4.8" serde = "1.0.101" serde_json = "1.0.41" -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } [target.'cfg(not(target_os = "unknown"))'.dependencies] http = { package = "jsonrpc-http-server", version = "14.0.3" } diff --git a/client/rpc/Cargo.toml b/client/rpc/Cargo.toml index d09617ebc1658405e87fa127827987d6ddee9d94..9a8be85becf59f61fc914ab6c707dd976714a0d2 100644 --- a/client/rpc/Cargo.toml +++ b/client/rpc/Cargo.toml @@ -1,41 +1,44 @@ [package] name = "sc-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate Client RPC" [dependencies] -sc-rpc-api = { version = "0.8", path = "../rpc-api" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "0.8", path = "../" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.0.0" } +sc-rpc-api = { version = "0.8.0-alpha.2", path = "../rpc-api" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-client = { version = "0.8.0-alpha.2", path = "../" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.2.0" } futures = { version = "0.3.1", features = ["compat"] } jsonrpc-pubsub = "14.0.3" log = "0.4.8" -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } rpc = { package = "jsonrpc-core", version = "14.0.3" } -sp-version = { version = "2.0.0", path = "../../primitives/version" } +sp-version = { version = "2.0.0-alpha.2", path = "../../primitives/version" } serde_json = "1.0.41" -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-offchain = { version = "2.0.0", path = "../../primitives/offchain" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-rpc = { version = "2.0.0", path = "../../primitives/rpc" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sc-executor = { version = "0.8", path = "../executor" } -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-session = { version = "2.0.0-alpha.2", path = "../../primitives/session" } +sp-offchain = { version = "2.0.0-alpha.2", path = "../../primitives/offchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-rpc = { version = "2.0.0-alpha.2", path = "../../primitives/rpc" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } hash-db = { version = "0.15.2", default-features = false } parking_lot = "0.10.0" [dev-dependencies] assert_matches = "1.3.0" futures01 = { package = "futures", version = "0.1.29" } -sc-network = { version = "0.8", path = "../network" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } rustc-hex = "2.0.1" -sp-io = { version = "2.0.0", path = "../../primitives/io" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } tokio = "0.1.22" -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../transaction-pool" } diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index 06bdcf883c6891548717cd4d44824e0816ba0fd9..80a3a4349ed82f5fa2ebe04d96cf24d28e18ae5e 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -22,8 +22,7 @@ mod tests; use std::{sync::Arc, convert::TryInto}; use log::warn; -use sc_client::Client; -use sp_blockchain::Error as ClientError; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use rpc::futures::{ Sink, Future, @@ -36,7 +35,7 @@ use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use codec::{Encode, Decode}; use sp_core::{Bytes, traits::BareCryptoStorePtr}; use sp_api::ProvideRuntimeApi; -use sp_runtime::{generic, traits}; +use sp_runtime::generic; use sp_transaction_pool::{ TransactionPool, InPoolTransaction, TransactionStatus, BlockHash, TxHash, TransactionFor, error::IntoPoolError, @@ -48,9 +47,9 @@ pub use sc_rpc_api::author::*; use self::error::{Error, FutureResult, Result}; /// Authoring API -pub struct Author { +pub struct Author { /// Substrate client - client: Arc>, + client: Arc, /// Transactions pool pool: Arc

, /// Subscriptions manager @@ -59,10 +58,10 @@ pub struct Author { keystore: BareCryptoStorePtr, } -impl Author { +impl Author { /// Create new instance of Authoring API. pub fn new( - client: Arc>, + client: Arc, pool: Arc

, subscriptions: Subscriptions, keystore: BareCryptoStorePtr, @@ -76,18 +75,11 @@ impl Author { } } -impl AuthorApi, BlockHash

> - for Author::Block, RA> -where - B: sc_client_api::backend::Backend<

::Block> + Send + Sync + 'static, - E: sc_client::CallExecutor<

::Block> + Send + Sync + 'static, - P: TransactionPool + Sync + Send + 'static, - P::Block: traits::Block, - P::Error: 'static, - RA: Send + Sync + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - SessionKeys, +impl AuthorApi, BlockHash

> for Author + where + P: TransactionPool + Sync + Send + 'static, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: SessionKeys, { type Metadata = crate::metadata::Metadata; @@ -105,7 +97,7 @@ where } fn rotate_keys(&self) -> Result { - let best_block_hash = self.client.chain_info().best_hash; + let best_block_hash = self.client.info().best_hash; self.client.runtime_api().generate_session_keys( &generic::BlockId::Hash(best_block_hash), None, @@ -113,7 +105,7 @@ where } fn has_session_keys(&self, session_keys: Bytes) -> Result { - let best_block_hash = self.client.chain_info().best_hash; + let best_block_hash = self.client.info().best_hash; let keys = self.client.runtime_api().decode_session_keys( &generic::BlockId::Hash(best_block_hash), session_keys.to_vec(), @@ -133,7 +125,7 @@ where Ok(xt) => xt, Err(err) => return Box::new(result(Err(err.into()))), }; - let best_block_hash = self.client.chain_info().best_hash; + let best_block_hash = self.client.info().best_hash; Box::new(self.pool .submit_one(&generic::BlockId::hash(best_block_hash), xt) .compat() @@ -176,7 +168,7 @@ where xt: Bytes, ) { let submit = || -> Result<_> { - let best_block_hash = self.client.chain_info().best_hash; + let best_block_hash = self.client.info().best_hash; let dxt = TransactionFor::

::decode(&mut &xt[..]) .map_err(error::Error::from)?; Ok( diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 41bfc46d388bd5e64b3efc63f5645394916a1db1..3093cd9d3b759b08d887eb10165e26f697a9a073 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -25,8 +25,8 @@ use sp_core::{ }; use rpc::futures::Stream as _; use substrate_test_runtime_client::{ - self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, RuntimeApi, Block}, - DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, Executor, + self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, Block}, + DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, }; use sc_transaction_pool::{BasicPool, FullChainApi}; use tokio::runtime; @@ -75,7 +75,7 @@ impl Default for TestSetup { } impl TestSetup { - fn author(&self) -> Author { + fn author(&self) -> Author> { Author { client: self.client.clone(), pool: self.pool.clone(), diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index ff732368fe9d219f42cbe83ab94ebe47a14c2da3..ea562d47748c50aeb6e1ba110773fc568ce84c25 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -20,37 +20,39 @@ use std::sync::Arc; use rpc::futures::future::result; use sc_rpc_api::Subscriptions; -use sc_client_api::{CallExecutor, backend::Backend}; -use sc_client::Client; +use sc_client_api::{BlockchainEvents, BlockBody}; use sp_runtime::{generic::{BlockId, SignedBlock}, traits::{Block as BlockT}}; use super::{ChainBackend, client_err, error::FutureResult}; +use std::marker::PhantomData; +use sp_blockchain::HeaderBackend; /// Blockchain API backend for full nodes. Reads all the data from local database. -pub struct FullChain { +pub struct FullChain { /// Substrate client. - client: Arc>, + client: Arc, /// Current subscriptions. subscriptions: Subscriptions, + /// phantom member to pin the block type + _phantom: PhantomData, } -impl FullChain { +impl FullChain { /// Create new Chain API RPC handler. - pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { + pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { Self { client, subscriptions, + _phantom: PhantomData, } } } -impl ChainBackend for FullChain where +impl ChainBackend for FullChain where Block: BlockT + 'static, - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, + Client: BlockBody + HeaderBackend + BlockchainEvents + 'static, { - fn client(&self) -> &Arc> { + fn client(&self) -> &Arc { &self.client } @@ -60,7 +62,7 @@ impl ChainBackend for FullChain) -> FutureResult> { Box::new(result(self.client - .header(&BlockId::Hash(self.unwrap_or_best(hash))) + .header(BlockId::Hash(self.unwrap_or_best(hash))) .map_err(client_err) )) } diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index 3e26bd24bb090be405614d28c90359001a59758a..b258c8dd3bc2561f65e10c6045cb7d2c1762840e 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -22,7 +22,7 @@ use rpc::futures::future::{result, Future, Either}; use sc_rpc_api::Subscriptions; use sc_client::{ - Client, light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain}, + light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain}, }; use sp_runtime::{ generic::{BlockId, SignedBlock}, @@ -30,12 +30,14 @@ use sp_runtime::{ }; use super::{ChainBackend, client_err, error::FutureResult}; +use sp_blockchain::HeaderBackend; +use sc_client_api::BlockchainEvents; /// Blockchain API backend for light nodes. Reads all the data from local /// database, if available, or fetches it from remote node otherwise. -pub struct LightChain { +pub struct LightChain { /// Substrate client. - client: Arc>, + client: Arc, /// Current subscriptions. subscriptions: Subscriptions, /// Remote blockchain reference @@ -44,10 +46,10 @@ pub struct LightChain { fetcher: Arc, } -impl> LightChain { +impl> LightChain { /// Create new Chain API RPC handler. pub fn new( - client: Arc>, + client: Arc, subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, @@ -61,14 +63,12 @@ impl> LightChain } } -impl ChainBackend for LightChain where +impl ChainBackend for LightChain where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, F: Fetcher + Send + Sync + 'static, { - fn client(&self) -> &Arc> { + fn client(&self) -> &Arc { &self.client } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index a2971983c793f18f9858b53211f0525cd977fd87..e7a927e780627b5d89eb76bc869b39ea760f43c3 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -32,7 +32,7 @@ use rpc::{ use sc_rpc_api::Subscriptions; use sc_client::{ - self, Client, BlockchainEvents, + self, BlockchainEvents, light::{fetcher::Fetcher, blockchain::RemoteBlockchain}, }; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; @@ -45,16 +45,17 @@ use sp_runtime::{ use self::error::{Result, Error, FutureResult}; pub use sc_rpc_api::chain::*; +use sp_blockchain::HeaderBackend; +use sc_client_api::BlockBody; /// Blockchain backend API -trait ChainBackend: Send + Sync + 'static +trait ChainBackend: Send + Sync + 'static where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { /// Get client reference. - fn client(&self) -> &Arc>; + fn client(&self) -> &Arc; /// Get subscriptions reference. fn subscriptions(&self) -> &Subscriptions; @@ -62,7 +63,7 @@ trait ChainBackend: Send + Sync + 'static /// Tries to unwrap passed block hash, or uses best block hash otherwise. fn unwrap_or_best(&self, hash: Option) -> Block::Hash { match hash.into() { - None => self.client().chain_info().best_hash, + None => self.client().info().best_hash, Some(hash) => hash, } } @@ -81,9 +82,9 @@ trait ChainBackend: Send + Sync + 'static number: Option>>, ) -> Result> { Ok(match number { - None => Some(self.client().chain_info().best_hash), + None => Some(self.client().info().best_hash), Some(num_or_hex) => self.client() - .header(&BlockId::number(num_or_hex.to_number()?)) + .header(BlockId::number(num_or_hex.to_number()?)) .map_err(client_err)? .map(|h| h.hash()), }) @@ -91,10 +92,36 @@ trait ChainBackend: Send + Sync + 'static /// Get hash of the last finalized block in the canon chain. fn finalized_head(&self) -> Result { - Ok(self.client().chain_info().finalized_hash) + Ok(self.client().info().finalized_hash) } - /// New head subscription + /// All new head subscription + fn subscribe_all_heads( + &self, + _metadata: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + subscribe_headers( + self.client(), + self.subscriptions(), + subscriber, + || self.client().info().best_hash, + || self.client().import_notification_stream() + .map(|notification| Ok::<_, ()>(notification.header)) + .compat(), + ) + } + + /// Unsubscribe from all head subscription. + fn unsubscribe_all_heads( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions().cancel(id)) + } + + /// New best head subscription fn subscribe_new_heads( &self, _metadata: crate::metadata::Metadata, @@ -104,7 +131,7 @@ trait ChainBackend: Send + Sync + 'static self.client(), self.subscriptions(), subscriber, - || self.client().chain_info().best_hash, + || self.client().info().best_hash, || self.client().import_notification_stream() .filter(|notification| future::ready(notification.is_new_best)) .map(|notification| Ok::<_, ()>(notification.header)) @@ -112,7 +139,7 @@ trait ChainBackend: Send + Sync + 'static ) } - /// Unsubscribe from new head subscription. + /// Unsubscribe from new best head subscription. fn unsubscribe_new_heads( &self, _metadata: Option, @@ -121,7 +148,7 @@ trait ChainBackend: Send + Sync + 'static Ok(self.subscriptions().cancel(id)) } - /// New head subscription + /// Finalized head subscription fn subscribe_finalized_heads( &self, _metadata: crate::metadata::Metadata, @@ -131,14 +158,14 @@ trait ChainBackend: Send + Sync + 'static self.client(), self.subscriptions(), subscriber, - || self.client().chain_info().finalized_hash, + || self.client().info().finalized_hash, || self.client().finality_notification_stream() .map(|notification| Ok::<_, ()>(notification.header)) .compat(), ) } - /// Unsubscribe from new head subscription. + /// Unsubscribe from finalized head subscription. fn unsubscribe_finalized_heads( &self, _metadata: Option, @@ -149,15 +176,13 @@ trait ChainBackend: Send + Sync + 'static } /// Create new state API that works on full node. -pub fn new_full( - client: Arc>, +pub fn new_full( + client: Arc, subscriptions: Subscriptions, -) -> Chain +) -> Chain where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + Client: BlockBody + HeaderBackend + BlockchainEvents + 'static, { Chain { backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)), @@ -165,17 +190,15 @@ pub fn new_full( } /// Create new state API that works on light node. -pub fn new_light>( - client: Arc>, +pub fn new_light>( + client: Arc, subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, -) -> Chain +) -> Chain where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + Client: BlockBody + HeaderBackend + BlockchainEvents + 'static, F: Send + Sync + 'static, { Chain { @@ -189,15 +212,15 @@ pub fn new_light>( } /// Chain API with subscriptions support. -pub struct Chain { - backend: Box>, +pub struct Chain { + backend: Box>, } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> for Chain where - Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static +impl ChainApi, Block::Hash, Block::Header, SignedBlock> for + Chain + where + Block: BlockT + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { type Metadata = crate::metadata::Metadata; @@ -229,6 +252,14 @@ impl ChainApi, Block::Hash, Block::Header, Sig self.backend.finalized_head() } + fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { + self.backend.subscribe_all_heads(metadata, subscriber) + } + + fn unsubscribe_all_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { + self.backend.unsubscribe_all_heads(metadata, id) + } + fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { self.backend.subscribe_new_heads(metadata, subscriber) } @@ -247,16 +278,15 @@ impl ChainApi, Block::Hash, Block::Header, Sig } /// Subscribe to new headers. -fn subscribe_headers( - client: &Arc>, +fn subscribe_headers( + client: &Arc, subscriptions: &Subscriptions, subscriber: Subscriber, best_block_hash: G, stream: F, ) where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static, + Client: HeaderBackend + 'static, F: FnOnce() -> S, G: FnOnce() -> Block::Hash, ERR: ::std::fmt::Debug, @@ -264,7 +294,7 @@ fn subscribe_headers( { subscriptions.add(subscriber, |sink| { // send current head right at the start. - let header = client.header(&BlockId::Hash(best_block_hash())) + let header = client.header(BlockId::Hash(best_block_hash())) .map_err(client_err) .and_then(|header| { header.ok_or_else(|| "Best header missing.".to_owned().into()) diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index eb05639018781d3bd7a320d92219f48a30372877..02e4d2f16337bad25bf2ff29629ce2c3a876c612 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -195,6 +195,35 @@ fn should_notify_about_latest_block() { let remote = core.executor(); let (subscriber, id, transport) = Subscriber::new_test("test"); + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + api.subscribe_all_heads(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let block = client.new_block(Default::default()).unwrap().build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); +} + +#[test] +fn should_notify_about_best_block() { + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + { let mut client = Arc::new(substrate_test_runtime_client::new()); let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 8f621cc8afc9667fed9c15d79db7d9ffa69248cd..82568866ee3ba4df6686d9201783f7cf8cfa673e 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -27,26 +27,26 @@ use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use rpc::{Result as RpcResult, futures::{Future, future::result}}; use sc_rpc_api::Subscriptions; -use sc_client::{Client, CallExecutor, light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; +use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}}; use sp_version::RuntimeVersion; use sp_runtime::traits::Block as BlockT; -use sp_api::{Metadata, ProvideRuntimeApi}; +use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use self::error::{Error, FutureResult}; pub use sc_rpc_api::state::*; +use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend}; +use sp_blockchain::{HeaderMetadata, HeaderBackend}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; /// State backend API. -pub trait StateBackend: Send + Sync + 'static +pub trait StateBackend: Send + Sync + 'static where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: sc_client::CallExecutor + Send + Sync + 'static, - RA: Send + Sync + 'static, + Client: Send + Sync + 'static, { /// Call runtime method at given block. fn call( @@ -194,18 +194,18 @@ pub trait StateBackend: Send + Sync + 'static } /// Create new state API that works on full node. -pub fn new_full( - client: Arc>, +pub fn new_full( + client: Arc, subscriptions: Subscriptions, -) -> State +) -> State where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - Metadata, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + HeaderBackend + + HeaderMetadata + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: Metadata, { State { backend: Box::new(self::state_full::FullState::new(client, subscriptions)), @@ -213,17 +213,19 @@ pub fn new_full( } /// Create new state API that works on light node. -pub fn new_light>( - client: Arc>, +pub fn new_light>( + client: Arc, subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, -) -> State +) -> State where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + + HeaderMetadata + + ProvideRuntimeApi + HeaderBackend + BlockchainEvents + + Send + Sync + 'static, F: Send + Sync + 'static, { State { @@ -237,16 +239,14 @@ pub fn new_light>( } /// State API with subscriptions support. -pub struct State { - backend: Box>, +pub struct State { + backend: Box>, } -impl StateApi for State +impl StateApi for State where Block: BlockT + 'static, - B: sc_client_api::backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + Client: Send + Sync + 'static, { type Metadata = crate::metadata::Metadata; diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index 3d5613626e0447f28b4ced86a2f3138daed5737c..b7589d2aefecafc9daa41e2122461c21f53c93d2 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -26,12 +26,8 @@ use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::r use sc_rpc_api::Subscriptions; use sc_client_api::backend::Backend; -use sp_blockchain::{ - Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata -}; -use sc_client::{ - Client, CallExecutor, BlockchainEvents -}; +use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; +use sc_client::BlockchainEvents; use sp_core::{ Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, }; @@ -40,9 +36,11 @@ use sp_runtime::{ generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion}, }; -use sp_api::{Metadata, ProvideRuntimeApi}; +use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; +use std::marker::PhantomData; +use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { @@ -59,25 +57,27 @@ struct QueryStorageRange { } /// State API backend for full nodes. -pub struct FullState { - client: Arc>, +pub struct FullState { + client: Arc, subscriptions: Subscriptions, + _phantom: PhantomData<(BE, Block)> } -impl FullState +impl FullState where + BE: Backend, + Client: StorageProvider + HeaderBackend + + HeaderMetadata, Block: BlockT + 'static, - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, { /// Create new state API backend for full nodes. - pub fn new(client: Arc>, subscriptions: Subscriptions) -> Self { - Self { client, subscriptions } + pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { + Self { client, subscriptions, _phantom: PhantomData } } /// Returns given block hash or best block hash if None is passed. fn block_or_best(&self, hash: Option) -> ClientResult { - Ok(hash.unwrap_or_else(|| self.client.chain_info().best_hash)) + Ok(hash.unwrap_or_else(|| self.client.info().best_hash)) } /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. @@ -212,14 +212,14 @@ impl FullState } } -impl StateBackend for FullState where +impl StateBackend for FullState where Block: BlockT + 'static, - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - Metadata, + BE: Backend + 'static, + Client: ExecutorProvider + StorageProvider + HeaderBackend + + HeaderMetadata + BlockchainEvents + + CallApiAt + ProvideRuntimeApi + + Send + Sync + 'static, + Client::Api: Metadata, { fn call( &self, @@ -424,7 +424,7 @@ impl StateBackend for FullState StateBackend for FullState>; /// State API backend for light nodes. -pub struct LightState, B, E, RA> { - client: Arc>, +pub struct LightState, Client> { + client: Arc, subscriptions: Subscriptions, version_subscriptions: SimpleSubscriptions, storage_subscriptions: Arc>>, @@ -134,16 +133,14 @@ impl SharedRequests for SimpleSubscriptions where } } -impl + 'static, B, E, RA> LightState +impl + 'static, Client> LightState where Block: BlockT, - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + Client: HeaderBackend + Send + Sync + 'static, { /// Create new state API backend for light nodes. pub fn new( - client: Arc>, + client: Arc, subscriptions: Subscriptions, remote_blockchain: Arc>, fetcher: Arc, @@ -164,16 +161,14 @@ impl + 'static, B, E, RA> LightState) -> Block::Hash { - hash.unwrap_or_else(|| self.client.chain_info().best_hash) + hash.unwrap_or_else(|| self.client.info().best_hash) } } -impl StateBackend for LightState +impl StateBackend for LightState where Block: BlockT, - B: Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static + Clone, - RA: Send + Sync + 'static, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, F: Fetcher + 'static { fn call( @@ -241,7 +236,7 @@ impl StateBackend for LightState::hash(&storage.0)))) + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) ) ) } @@ -302,7 +297,7 @@ impl StateBackend for LightState::hash(&storage.0)))) + result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) ) ) } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index a280110093b0bbf99c0e0a76fdd0e2e0f35aad8b..4487566e44cfd9fc18b41112d9ec66555c728196 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -69,7 +69,7 @@ fn api>>(sync: T) -> System { let _ = sender.send(peers); } Request::NetworkState(sender) => { - let _ = sender.send(serde_json::to_value(&sc_network::NetworkState { + let _ = sender.send(serde_json::to_value(&sc_network::network_state::NetworkState { peer_id: String::new(), listened_addresses: Default::default(), external_addresses: Default::default(), @@ -223,8 +223,8 @@ fn system_peers() { fn system_network_state() { let res = wait_receiver(api(None).system_network_state()); assert_eq!( - serde_json::from_value::(res).unwrap(), - sc_network::NetworkState { + serde_json::from_value::(res).unwrap(), + sc_network::network_state::NetworkState { peer_id: String::new(), listened_addresses: Default::default(), external_addresses: Default::default(), diff --git a/client/service/Cargo.toml b/client/service/Cargo.toml index 1e781aa695634c239cf24acf7f7cddf789ad9f27..bfc08eb705aa4454c4f8e3e5ee82d0d31d46864b 100644 --- a/client/service/Cargo.toml +++ b/client/service/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-service" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate service. Starts a thread that spins up the network, client, and extrinsic pool. Manages communication between them." [features] default = ["rocksdb"] @@ -30,37 +33,36 @@ serde = "1.0.101" serde_json = "1.0.41" sysinfo = "0.9.5" target_info = "0.1.0" -sc-keystore = { version = "2.0.0", path = "../keystore" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-session = { version = "2.0.0", path = "../../primitives/session" } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sc-network = { version = "0.8", path = "../network" } -sc-chain-spec = { version = "2.0.0", path = "../chain-spec" } -sc-client-api = { version = "2.0.0", path = "../api" } -sc-client = { version = "0.8", path = "../" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sc-client-db = { version = "0.8", path = "../db" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-executor = { version = "0.8", path = "../executor" } -sc-transaction-pool = { version = "2.0.0", path = "../transaction-pool" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-rpc-server = { version = "2.0.0", path = "../rpc-servers" } -sc-rpc = { version = "2.0.0", path = "../rpc" } -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -sc-offchain = { version = "2.0.0", path = "../offchain" } -parity-multiaddr = { package = "parity-multiaddr", version = "0.7.1" } -grafana-data-source = { version = "0.8", path = "../../utils/grafana-data-source" } -sc-tracing = { version = "2.0.0", path = "../tracing" } +sc-keystore = { version = "2.0.0-alpha.2", path = "../keystore" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-session = { version = "2.0.0-alpha.2", path = "../../primitives/session" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sc-network = { version = "0.8.0-alpha.2", path = "../network" } +sc-chain-spec = { version = "2.0.0-alpha.2", path = "../chain-spec" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sc-client = { version = "0.8.0-alpha.2", path = "../" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sc-client-db = { version = "0.8.0-alpha.2", path = "../db" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-executor = { version = "0.8.0-alpha.2", path = "../executor" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../transaction-pool" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +sc-rpc-server = { version = "2.0.0-alpha.2", path = "../rpc-servers" } +sc-rpc = { version = "2.0.0-alpha.2", path = "../rpc" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } +sc-offchain = { version = "2.0.0-alpha.2", path = "../offchain" } +parity-multiaddr = { package = "parity-multiaddr", version = "0.5.0" } +prometheus-endpoint = { package = "substrate-prometheus-endpoint", path = "../../utils/prometheus" , version = "0.8.0-alpha.2"} +sc-tracing = { version = "2.0.0-alpha.2", path = "../tracing" } tracing = "0.1.10" parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } -sp-consensus-babe = { version = "0.8", path = "../../primitives/consensus/babe" } -grandpa = { version = "0.8", package = "sc-finality-grandpa", path = "../finality-grandpa" } -grandpa-primitives = { version = "2.0.0", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } -tokio = { version = "0.2", features = ["rt-core"] } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } +sp-consensus-babe = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/babe" } +grandpa = { version = "0.8.0-alpha.2", package = "sc-finality-grandpa", path = "../finality-grandpa" } +grandpa-primitives = { version = "2.0.0-alpha.2", package = "sp-finality-grandpa", path = "../../primitives/finality-grandpa" } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index c67551afa35564b7f91d39ac2317f1361f2496d4..096f492e6468b32a5c29a122c4caaf4e66833d22 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -15,14 +15,15 @@ // along with Substrate. If not, see . use crate::{Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm}; -use crate::{SpawnTaskHandle, start_rpc_servers, build_network_future, TransactionPoolAdapter}; +use crate::{TaskManagerBuilder, start_rpc_servers, build_network_future, TransactionPoolAdapter}; use crate::status_sinks; -use crate::config::{Configuration, DatabaseConfig, KeystoreConfig}; +use crate::config::{Configuration, DatabaseConfig, KeystoreConfig, PrometheusConfig}; use sc_client_api::{ self, BlockchainEvents, backend::RemoteBackend, light::RemoteBlockchain, execution_extensions::ExtensionsFactory, + ExecutorProvider, CallExecutor }; use sc_client::Client; use sc_chain_spec::{RuntimeGenesis, Extension}; @@ -30,21 +31,20 @@ use sp_consensus::import_queue::ImportQueue; use futures::{ Future, FutureExt, StreamExt, channel::mpsc, - future::{select, ready} + future::ready, }; use sc_keystore::{Store as Keystore}; use log::{info, warn, error}; -use sc_network::{FinalityProofProvider, OnDemand, NetworkService, NetworkStateInfo}; -use sc_network::{config::BoxFinalityProofRequestBuilder, specialization::NetworkSpecialization}; +use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_network::{NetworkService, NetworkStateInfo}; use parking_lot::{Mutex, RwLock}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HasherFor, + Block as BlockT, NumberFor, SaturatedConversion, HashFor, UniqueSaturatedInto, }; use sp_api::ProvideRuntimeApi; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; use std::{ - borrow::Cow, io::{Read, Write, Seek}, marker::PhantomData, sync::Arc, pin::Pin }; @@ -53,7 +53,44 @@ use sysinfo::{get_current_pid, ProcessExt, System, SystemExt}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; use sp_blockchain; -use grafana_data_source::{self, record_metrics}; +use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; + +struct ServiceMetrics { + block_height_number: GaugeVec, + ready_transactions_number: Gauge, + memory_usage_bytes: Gauge, + cpu_usage_percentage: Gauge, + network_per_sec_bytes: GaugeVec, + node_roles: Gauge, +} + +impl ServiceMetrics { + fn register(registry: &Registry) -> Result { + Ok(Self { + block_height_number: register(GaugeVec::new( + Opts::new("block_height_number", "Height of the chain"), + &["status"] + )?, registry)?, + ready_transactions_number: register(Gauge::new( + "ready_transactions_number", "Number of transactions in the ready queue", + )?, registry)?, + memory_usage_bytes: register(Gauge::new( + "memory_usage_bytes", "Node memory usage", + )?, registry)?, + cpu_usage_percentage: register(Gauge::new( + "cpu_usage_percentage", "Node CPU usage", + )?, registry)?, + network_per_sec_bytes: register(GaugeVec::new( + Opts::new("network_per_sec_bytes", "Networking bytes per second"), + &["direction"] + )?, registry)?, + node_roles: register(Gauge::new( + "node_roles", "The roles the node is running as", + )?, registry)?, + + }) + } +} pub type BackgroundTask = Pin + Send>>; @@ -66,7 +103,6 @@ pub type BackgroundTask = Pin + Send>>; /// /// - [`with_select_chain`](ServiceBuilder::with_select_chain) /// - [`with_import_queue`](ServiceBuilder::with_import_queue) -/// - [`with_network_protocol`](ServiceBuilder::with_network_protocol) /// - [`with_finality_proof_provider`](ServiceBuilder::with_finality_proof_provider) /// - [`with_transaction_pool`](ServiceBuilder::with_transaction_pool) /// @@ -76,18 +112,18 @@ pub type BackgroundTask = Pin + Send>>; /// generics is done when you call `build`. /// pub struct ServiceBuilder + TExPool, TRpc, Backend> { config: Configuration, pub (crate) client: Arc, backend: Arc, + tasks_builder: TaskManagerBuilder, keystore: Arc>, fetcher: Option, select_chain: Option, pub (crate) import_queue: TImpQu, finality_proof_request_builder: Option, finality_proof_provider: Option, - network_protocol: TNetP, transaction_pool: Arc, rpc_extensions: TRpc, remote_backend: Option>>, @@ -123,19 +159,19 @@ pub type TLightClient = Client< /// Light client backend type. pub type TLightBackend = sc_client::light::backend::Backend< sc_client_db::light::LightStorage, - HasherFor, + HashFor, >; /// Light call executor type. pub type TLightCallExecutor = sc_client::light::call_executor::GenesisCallExecutor< sc_client::light::backend::Backend< sc_client_db::light::LightStorage, - HasherFor + HashFor >, sc_client::LocalCallExecutor< sc_client::light::backend::Backend< sc_client_db::light::LightStorage, - HasherFor + HashFor >, NativeExecutor >, @@ -145,6 +181,7 @@ type TFullParts = ( TFullClient, Arc>, Arc>, + TaskManagerBuilder, ); /// Creates a new full client for the given config. @@ -176,6 +213,8 @@ fn new_full_parts( KeystoreConfig::None => return Err("No keystore config provided!".into()), }; + let tasks_builder = TaskManagerBuilder::new(); + let executor = NativeExecutor::::new( config.wasm_method, config.default_heap_pages, @@ -223,13 +262,14 @@ fn new_full_parts( fork_blocks, bad_blocks, extensions, + config.prometheus_config.as_ref().map(|config| config.registry.clone()), )? }; - Ok((client, backend, keystore)) + Ok((client, backend, keystore, tasks_builder)) } -impl ServiceBuilder<(), (), TGen, TCSExt, (), (), (), (), (), (), (), (), (), ()> +impl ServiceBuilder<(), (), TGen, TCSExt, (), (), (), (), (), (), (), (), ()> where TGen: RuntimeGenesis, TCSExt: Extension { /// Start the service builder with a configuration. pub fn new_full( @@ -247,10 +287,9 @@ where TGen: RuntimeGenesis, TCSExt: Extension { Arc>, (), (), - (), TFullBackend, >, Error> { - let (client, backend, keystore) = new_full_parts(&config)?; + let (client, backend, keystore, tasks_builder) = new_full_parts(&config)?; let client = Arc::new(client); @@ -259,12 +298,12 @@ where TGen: RuntimeGenesis, TCSExt: Extension { client, backend, keystore, + tasks_builder, fetcher: None, select_chain: None, import_queue: (), finality_proof_request_builder: None, finality_proof_provider: None, - network_protocol: (), transaction_pool: Arc::new(()), rpc_extensions: Default::default(), remote_backend: None, @@ -289,9 +328,10 @@ where TGen: RuntimeGenesis, TCSExt: Extension { Arc>, (), (), - (), TLightBackend, >, Error> { + let tasks_builder = TaskManagerBuilder::new(); + let keystore = match &config.keystore { KeystoreConfig::Path { path, password } => Keystore::open( path.clone(), @@ -331,26 +371,27 @@ where TGen: RuntimeGenesis, TCSExt: Extension { executor.clone(), ), ); - let fetcher = Arc::new(sc_network::OnDemand::new(fetch_checker)); + let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); let backend = sc_client::light::new_light_backend(light_blockchain); let remote_blockchain = backend.remote_blockchain(); let client = Arc::new(sc_client::light::new_light( backend.clone(), config.expect_chain_spec(), executor, + config.prometheus_config.as_ref().map(|config| config.registry.clone()), )?); Ok(ServiceBuilder { config, client, backend, + tasks_builder, keystore, fetcher: Some(fetcher.clone()), select_chain: None, import_queue: (), finality_proof_request_builder: None, finality_proof_provider: None, - network_protocol: (), transaction_pool: Arc::new(()), rpc_extensions: Default::default(), remote_backend: Some(remote_blockchain), @@ -360,9 +401,9 @@ where TGen: RuntimeGenesis, TCSExt: Extension { } } -impl +impl ServiceBuilder { + TExPool, TRpc, Backend> { /// Returns a reference to the client that was stored in this builder. pub fn client(&self) -> &Arc { @@ -410,20 +451,20 @@ impl, &Arc ) -> Result, Error> ) -> Result, Error> { + TExPool, TRpc, Backend>, Error> { let select_chain = select_chain_builder(&self.config, &self.backend)?; Ok(ServiceBuilder { config: self.config, client: self.client, backend: self.backend, + tasks_builder: self.tasks_builder, keystore: self.keystore, fetcher: self.fetcher, select_chain, import_queue: self.import_queue, finality_proof_request_builder: self.finality_proof_request_builder, finality_proof_provider: self.finality_proof_provider, - network_protocol: self.network_protocol, transaction_pool: self.transaction_pool, rpc_extensions: self.rpc_extensions, remote_backend: self.remote_backend, @@ -437,7 +478,7 @@ impl, &Arc) -> Result ) -> Result, Error> { + TExPool, TRpc, Backend>, Error> { self.with_opt_select_chain(|cfg, b| builder(cfg, b).map(Option::Some)) } @@ -447,7 +488,7 @@ impl, Arc, Option, Arc) -> Result ) -> Result, Error> + TExPool, TRpc, Backend>, Error> where TSc: Clone { let import_queue = builder( &self.config, @@ -460,40 +501,13 @@ impl( - self, - network_protocol_builder: impl FnOnce(&Configuration) -> Result - ) -> Result, Error> { - let network_protocol = network_protocol_builder(&self.config)?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - network_protocol, transaction_pool: self.transaction_pool, rpc_extensions: self.rpc_extensions, remote_backend: self.remote_backend, @@ -517,7 +531,6 @@ impl>, - TNetP, TExPool, TRpc, Backend, @@ -528,13 +541,13 @@ impl>, - TNetP, TExPool, TRpc, Backend, @@ -578,7 +590,7 @@ impl, ) -> Result<(UImpQu, Option), Error> ) -> Result, Error> + TExPool, TRpc, Backend>, Error> where TSc: Clone, TFchr: Clone { let (import_queue, fprb) = builder( &self.config, @@ -593,13 +605,13 @@ impl, ) -> Result<(UImpQu, UFprb), Error> ) -> Result, Error> + TExPool, TRpc, Backend>, Error> where TSc: Clone, TFchr: Clone { self.with_import_queue_and_opt_fprb(|cfg, cl, b, f, sc, tx| builder(cfg, cl, b, f, sc, tx) @@ -637,7 +649,7 @@ impl, ) -> Result<(UExPool, Option), Error> ) -> Result, Error> + UExPool, TRpc, Backend>, Error> where TSc: Clone, TFchr: Clone { let (transaction_pool, background_task) = transaction_pool_builder( self.config.transaction_pool.clone(), @@ -652,6 +664,7 @@ impl Result, ) -> Result, Error> + TExPool, URpc, Backend>, Error> where TSc: Clone, TFchr: Clone { let rpc_extensions = rpc_ext_builder(&self)?; @@ -681,13 +693,13 @@ impl, to: Option>, - json: bool + binary: bool ) -> Pin>>>; /// Performs a revert of `blocks` blocks. @@ -733,7 +745,7 @@ pub trait ServiceBuilderCommand { ) -> Pin> + Send>>; } -impl +impl ServiceBuilder< TBl, TRtApi, @@ -745,7 +757,6 @@ ServiceBuilder< TImpQu, BoxFinalityProofRequestBuilder, Arc>, - TNetP, TExPool, TRpc, TBackend, @@ -766,7 +777,6 @@ ServiceBuilder< TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, TSc: Clone, TImpQu: 'static + ImportQueue, - TNetP: NetworkSpecialization, TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, TRpc: sc_rpc::RpcExtension + Clone, { @@ -783,18 +793,21 @@ ServiceBuilder< Client, TSc, NetworkStatus, - NetworkService::Hash>, + NetworkService::Hash>, TExPool, sc_offchain::OffchainWorkers< Client, TBackend::OffchainStorage, TBl >, - >, Error> { + >, Error> + where TExec: CallExecutor, + { let ServiceBuilder { marker: _, mut config, client, + tasks_builder, fetcher: on_demand, backend, keystore, @@ -802,7 +815,6 @@ ServiceBuilder< import_queue, finality_proof_request_builder, finality_proof_provider, - network_protocol, transaction_pool, rpc_extensions, remote_backend, @@ -815,12 +827,6 @@ ServiceBuilder< config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), )?; - let (signal, exit) = exit_future::signal(); - - // List of asynchronous tasks to spawn. We collect them, then spawn them all at once. - let (to_spawn_tx, to_spawn_rx) = - mpsc::unbounded::<(Pin + Send>>, Cow<'static, str>)>(); - // A side-channel for essential tasks to communicate shutdown. let (essential_failed_tx, essential_failed_rx) = mpsc::unbounded(); @@ -845,7 +851,7 @@ ServiceBuilder< imports_external_transactions: !config.roles.is_light(), pool: transaction_pool.clone(), client: client.clone(), - executor: SpawnTaskHandle { sender: to_spawn_tx.clone(), on_exit: exit.clone() }, + executor: tasks_builder.spawn_handle(), }); let protocol_id = { @@ -867,11 +873,9 @@ ServiceBuilder< let network_params = sc_network::config::Params { roles: config.roles, executor: { - let to_spawn_tx = to_spawn_tx.clone(); + let spawn_handle = tasks_builder.spawn_handle(); Some(Box::new(move |fut| { - if let Err(e) = to_spawn_tx.unbounded_send((fut, From::from("libp2p-node"))) { - error!("Failed to spawn libp2p background task: {:?}", e); - } + spawn_handle.spawn("libp2p-node", fut); })) }, network_config: config.network.clone(), @@ -882,8 +886,8 @@ ServiceBuilder< transaction_pool: transaction_pool_adapter.clone() as _, import_queue, protocol_id, - specialization: network_protocol, block_announce_validator, + metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()) }; let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); @@ -903,20 +907,19 @@ ServiceBuilder< _ => None, }; + let spawn_handle = tasks_builder.spawn_handle(); + // Spawn background tasks which were stacked during the // service building. for (title, background_task) in background_tasks { - let _ = to_spawn_tx.unbounded_send(( - background_task, - title.into(), - )); + spawn_handle.spawn(title, background_task); } { // block notifications let txpool = Arc::downgrade(&transaction_pool); let offchain = offchain_workers.as_ref().map(Arc::downgrade); - let to_spawn_tx_ = to_spawn_tx.clone(); + let notifications_spawn_handle = tasks_builder.spawn_handle(); let network_state_info: Arc = network.clone(); let is_validator = config.roles.is_authority(); @@ -938,15 +941,14 @@ ServiceBuilder< let offchain = offchain.as_ref().and_then(|o| o.upgrade()); match offchain { Some(offchain) if is_new_best => { - let future = offchain.on_block_imported( - &header, - network_state_info.clone(), - is_validator, + notifications_spawn_handle.spawn( + "offchain-on-block", + offchain.on_block_imported( + &header, + network_state_info.clone(), + is_validator, + ), ); - let _ = to_spawn_tx_.unbounded_send(( - Box::pin(future), - From::from("offchain-on-block"), - )); }, Some(_) => log::debug!( target: "sc_offchain", @@ -959,20 +961,19 @@ ServiceBuilder< let txpool = txpool.upgrade(); if let Some(txpool) = txpool.as_ref() { - let future = txpool.maintain(event); - let _ = to_spawn_tx_.unbounded_send(( - Box::pin(future), - From::from("txpool-maintain") - )); + notifications_spawn_handle.spawn( + "txpool-maintain", + txpool.maintain(event), + ); } ready(()) }); - let _ = to_spawn_tx.unbounded_send(( - Box::pin(select(events, exit.clone()).map(drop)), - From::from("txpool-and-offchain-notif"), - )); + spawn_handle.spawn( + "txpool-and-offchain-notif", + events, + ); } { @@ -992,12 +993,24 @@ ServiceBuilder< ready(()) }); - let _ = to_spawn_tx.unbounded_send(( - Box::pin(select(events, exit.clone()).map(drop)), - From::from("telemetry-on-block"), - )); + spawn_handle.spawn( + "telemetry-on-block", + events, + ); } + // Prometheus metrics + let metrics = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + let metrics = ServiceMetrics::register(®istry)?; + metrics.node_roles.set(u64::from(config.roles.bits())); + spawn_handle.spawn( + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop) + ); + Some(metrics) + } else { + None + }; // Periodically notify the telemetry. let transaction_pool_ = transaction_pool.clone(); let client_ = client.clone(); @@ -1014,6 +1027,8 @@ ServiceBuilder< let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); let bandwidth_download = net_status.average_download_per_sec; let bandwidth_upload = net_status.average_upload_per_sec; + let best_seen_block = net_status.best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); // get cpu usage and memory usage of this process let (cpu_usage, memory) = if let Some(self_pid) = self_pid { @@ -1037,38 +1052,42 @@ ServiceBuilder< "finalized_hash" => ?info.chain.finalized_hash, "bandwidth_download" => bandwidth_download, "bandwidth_upload" => bandwidth_upload, - "used_state_cache_size" => info.usage.as_ref().map(|usage| usage.memory.state_cache).unwrap_or(0), - "used_db_cache_size" => info.usage.as_ref().map(|usage| usage.memory.database_cache).unwrap_or(0), - "disk_read_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_read).unwrap_or(0), - "disk_write_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_written).unwrap_or(0), - ); - #[cfg(not(target_os = "unknown"))] - let memory_transaction_pool = parity_util_mem::malloc_size(&*transaction_pool_); - #[cfg(target_os = "unknown")] - let memory_transaction_pool = 0; - let _ = record_metrics!( - "peers" => num_peers, - "height" => best_number, - "txcount" => txpool_status.ready, - "cpu" => cpu_usage, - "memory" => memory, - "finalized_height" => finalized_number, - "bandwidth_download" => bandwidth_download, - "bandwidth_upload" => bandwidth_upload, - "used_state_cache_size" => info.usage.as_ref().map(|usage| usage.memory.state_cache).unwrap_or(0), - "used_db_cache_size" => info.usage.as_ref().map(|usage| usage.memory.database_cache).unwrap_or(0), - "disk_read_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_read).unwrap_or(0), - "disk_write_per_sec" => info.usage.as_ref().map(|usage| usage.io.bytes_written).unwrap_or(0), - "memory_transaction_pool" => memory_transaction_pool, + "used_state_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.state_cache.as_bytes()) + .unwrap_or(0), + "used_db_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.database_cache.as_bytes()) + .unwrap_or(0), + "disk_read_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_read) + .unwrap_or(0), + "disk_write_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_written) + .unwrap_or(0), ); + if let Some(metrics) = metrics.as_ref() { + metrics.memory_usage_bytes.set(memory); + metrics.cpu_usage_percentage.set(f64::from(cpu_usage)); + metrics.ready_transactions_number.set(txpool_status.ready as u64); + + metrics.network_per_sec_bytes.with_label_values(&["download"]).set(net_status.average_download_per_sec); + metrics.network_per_sec_bytes.with_label_values(&["upload"]).set(net_status.average_upload_per_sec); + + metrics.block_height_number.with_label_values(&["finalized"]).set(finalized_number); + metrics.block_height_number.with_label_values(&["best"]).set(best_number); + + if let Some(best_seen_block) = best_seen_block { + metrics.block_height_number.with_label_values(&["sync_target"]).set(best_seen_block); + } + } ready(()) }); - let _ = to_spawn_tx.unbounded_send(( - Box::pin(select(tel_task, exit.clone()).map(drop)), - From::from("telemetry-periodic-send"), - )); + spawn_handle.spawn( + "telemetry-periodic-send", + tel_task, + ); // Periodically send the network state to the telemetry. let (netstat_tx, netstat_rx) = mpsc::unbounded::<(NetworkStatus<_>, NetworkState)>(); @@ -1081,10 +1100,10 @@ ServiceBuilder< ); ready(()) }); - let _ = to_spawn_tx.unbounded_send(( - Box::pin(select(tel_task_2, exit.clone()).map(drop)), - From::from("telemetry-periodic-network-state"), - )); + spawn_handle.spawn( + "telemetry-periodic-network-state", + tel_task_2, + ); // RPC let (system_rpc_tx, system_rpc_rx) = mpsc::unbounded(); @@ -1098,10 +1117,7 @@ ServiceBuilder< properties: chain_spec.properties().clone(), }; - let subscriptions = sc_rpc::Subscriptions::new(Arc::new(SpawnTaskHandle { - sender: to_spawn_tx.clone(), - on_exit: exit.clone() - })); + let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = (remote_backend.as_ref(), on_demand.as_ref()) { @@ -1159,18 +1175,17 @@ ServiceBuilder< let rpc_handlers = gen_handler(); let rpc = start_rpc_servers(&config, gen_handler)?; - - let _ = to_spawn_tx.unbounded_send(( - Box::pin(select(build_network_future( + spawn_handle.spawn( + "network-worker", + build_network_future( config.roles, network_mut, client.clone(), network_status_sinks.clone(), system_rpc_rx, has_bootnodes, - ), exit.clone()).map(drop)), - From::from("network-worker"), - )); + ), + ); let telemetry_connection_sinks: Arc>>> = Default::default(); @@ -1211,21 +1226,14 @@ ServiceBuilder< }); ready(()) }); - let _ = to_spawn_tx.unbounded_send((Box::pin(select( - future, exit.clone() - ).map(drop)), From::from("telemetry-worker"))); - telemetry - }); - // Grafana data source - if let Some(port) = config.grafana_port { - let future = select( - grafana_data_source::run_server(port).boxed(), - exit.clone() - ).map(drop); + spawn_handle.spawn( + "telemetry-worker", + future, + ); - let _ = to_spawn_tx.unbounded_send((Box::pin(future), From::from("grafana-server"))); - } + telemetry + }); // Instrumentation if let Some(tracing_targets) = config.tracing_targets.as_ref() { @@ -1240,21 +1248,13 @@ ServiceBuilder< Ok(Service { client, + task_manager: tasks_builder.into_task_manager(config.task_executor.ok_or(Error::TaskExecutorRequired)?), network, network_status_sinks, select_chain, transaction_pool, - exit, - signal: Some(signal), essential_failed_tx, essential_failed_rx, - to_spawn_tx, - to_spawn_rx, - task_executor: if let Some(exec) = config.task_executor { - exec - } else { - return Err(Error::TaskExecutorRequired); - }, rpc_handlers, _rpc: rpc, _telemetry: telemetry, @@ -1262,6 +1262,7 @@ ServiceBuilder< _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), keystore, marker: PhantomData::, + prometheus_registry: config.prometheus_config.map(|config| config.registry) }) } } diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index 30987170f37ae58930ee08dec82694c2171d7633..350aac91758134ac9c809ffcba945f0b89591ada 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -35,8 +35,7 @@ use sp_consensus::{ use sc_executor::{NativeExecutor, NativeExecutionDispatch}; use std::{io::{Read, Write, Seek}, pin::Pin}; - -use sc_network::message; +use sc_client_api::BlockBody; /// Build a chain spec json pub fn build_spec(spec: ChainSpec, raw: bool) -> error::Result where @@ -48,12 +47,12 @@ pub fn build_spec(spec: ChainSpec, raw: bool) -> error::Result ServiceBuilderCommand for ServiceBuilder< TBl, TRtApi, TGen, TCSExt, Client>, TBl, TRtApi>, - TFchr, TSc, TImpQu, TFprb, TFpp, TNetP, TExPool, TRpc, Backend + TFchr, TSc, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend > where TBl: BlockT, TBackend: 'static + sc_client_api::backend::Backend + Send, @@ -141,21 +140,13 @@ impl< Ok(signed) => { let (header, extrinsics) = signed.block.deconstruct(); let hash = header.hash(); - let block = message::BlockData:: { - hash, - justification: signed.justification, - header: Some(header), - body: Some(extrinsics), - receipt: None, - message_queue: None - }; // import queue handles verification and importing it into the client queue.import_blocks(BlockOrigin::File, vec![ IncomingBlock:: { - hash: block.hash, - header: block.header, - body: block.body, - justification: block.justification, + hash, + header: Some(header), + body: Some(extrinsics), + justification: signed.justification, origin: None, allow_missing_state: false, import_existing: force, @@ -213,7 +204,7 @@ impl< mut output: impl Write + 'static, from: NumberFor, to: Option>, - json: bool + binary: bool ) -> Pin>>> { let client = self.client; let mut block = from; @@ -240,7 +231,7 @@ impl< if !wrote_header { info!("Exporting blocks from #{} to #{}", block, last); - if !json { + if binary { let last_: u64 = last.saturated_into::(); let block_: u64 = block.saturated_into::(); let len: u64 = last_ - block_ + 1; @@ -251,13 +242,13 @@ impl< match client.block(&BlockId::number(block))? { Some(block) => { - if json { + if binary { + output.write_all(&block.encode())?; + } else { serde_json::to_writer(&mut output, &block) .map_err(|e| format!("Error writing JSON: {}", e))?; - } else { - output.write_all(&block.encode())?; } - }, + }, // Reached end of the chain. None => return std::task::Poll::Ready(Ok(())), } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index 2099c600df15e3282774e13e9a558f734bef7b6c..6400712cad3b9ccd4b6a2b2539f45bf49b3e188a 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -27,6 +27,7 @@ use sc_chain_spec::{ChainSpec, NoExtension}; use sp_core::crypto::Protected; use target_info::Target; use sc_telemetry::TelemetryEndpoints; +use prometheus_endpoint::Registry; /// Executable version. Used to pass version information from the root crate. #[derive(Clone)] @@ -93,8 +94,8 @@ pub struct Configuration { pub rpc_ws_max_connections: Option, /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. pub rpc_cors: Option>, - /// Grafana data source http port. `None` if disabled. - pub grafana_port: Option, + /// Prometheus endpoint configuration. `None` if disabled. + pub prometheus_config: Option, /// Telemetry service URL. `None` if disabled. pub telemetry_endpoints: Option, /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry @@ -165,6 +166,28 @@ pub enum DatabaseConfig { Custom(Arc), } +/// Configuration of the Prometheus endpoint. +#[derive(Clone)] +pub struct PrometheusConfig { + /// Port to use. + pub port: SocketAddr, + /// A metrics registry to use. Useful for setting the metric prefix. + pub registry: Registry, +} + +impl PrometheusConfig { + /// Create a new config using the default registry. + /// + /// The default registry prefixes metrics with `substrate`. + pub fn new_with_default_registry(port: SocketAddr) -> Self { + Self { + port, + registry: Registry::new_custom(Some("substrate".into()), None) + .expect("this can only fail if the prefix is empty") + } + } +} + impl Default for Configuration { /// Create a default config fn default() -> Self { @@ -190,7 +213,7 @@ impl Default for Configuration { rpc_ws: None, rpc_ws_max_connections: None, rpc_cors: Some(vec![]), - grafana_port: None, + prometheus_config: None, telemetry_endpoints: None, telemetry_external_transport: None, default_heap_pages: None, @@ -207,7 +230,7 @@ impl Default for Configuration { impl Configuration { /// Create a default config using `VersionInfo` - pub fn new(version: &VersionInfo) -> Self { + pub fn from_version(version: &VersionInfo) -> Self { let mut config = Configuration::default(); config.impl_name = version.name; config.impl_version = version.version; @@ -254,6 +277,28 @@ impl Configuration { pub fn expect_database(&self) -> &DatabaseConfig { self.database.as_ref().expect("database must be specified") } + + /// Returns a string displaying the node role, special casing the sentry mode + /// (returning `SENTRY`), since the node technically has an `AUTHORITY` role but + /// doesn't participate. + pub fn display_role(&self) -> String { + if self.sentry_mode { + "SENTRY".to_string() + } else { + self.roles.to_string() + } + } + + /// Use in memory keystore config when it is not required at all. + /// + /// This function returns an error if the keystore is already set to something different than + /// `KeystoreConfig::None`. + pub fn use_in_memory_keystore(&mut self) -> Result<(), String> { + match &mut self.keystore { + cfg @ KeystoreConfig::None => { *cfg = KeystoreConfig::InMemory; Ok(()) }, + _ => Err("Keystore config specified when it should not be!".into()), + } + } } /// Returns platform info diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 059e1c19e490d6861339e278a274f5c060382950..5a78a1878923080ee05d2c5c46568356b8a71a59 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -53,6 +53,12 @@ impl<'a> From<&'a str> for Error { } } +impl From for Error { + fn from(e: prometheus_endpoint::PrometheusError) -> Self { + Error::Other(format!("Prometheus error: {}", e)) + } +} + impl std::error::Error for Error { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match self { diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index a2e53616aba94ad39d7c096e4b67fe5ad4f90fc7..db56c141db0b5bb7c6765a2ff8f53ec8039592e1 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -26,6 +26,7 @@ pub mod error; mod builder; mod status_sinks; +mod task_manager; use std::{borrow::Cow, io, pin::Pin}; use std::marker::PhantomData; @@ -37,18 +38,14 @@ use std::task::{Poll, Context}; use parking_lot::Mutex; use sc_client::Client; -use exit_future::Signal; use futures::{ Future, FutureExt, Stream, StreamExt, - future::select, channel::mpsc, + channel::mpsc, compat::*, sink::SinkExt, task::{Spawn, FutureObj, SpawnError}, }; -use sc_network::{ - NetworkService, NetworkState, specialization::NetworkSpecialization, - PeerId, ReportHandle, -}; +use sc_network::{NetworkService, network_state::NetworkState, PeerId, ReportHandle}; use log::{log, warn, debug, error, Level}; use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; @@ -71,7 +68,9 @@ pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] pub use std::{ops::Deref, result::Result, sync::Arc}; #[doc(hidden)] -pub use sc_network::{FinalityProofProvider, OnDemand, config::BoxFinalityProofRequestBuilder}; +pub use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +pub use task_manager::{TaskManagerBuilder, SpawnTaskHandle}; +use task_manager::TaskManager; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -88,28 +87,18 @@ impl MallocSizeOfWasm for T {} /// Substrate service. pub struct Service { client: Arc, + task_manager: TaskManager, select_chain: Option, network: Arc, /// Sinks to propagate network status updates. /// For each element, every time the `Interval` fires we push an element on the sender. network_status_sinks: Arc>>, transaction_pool: Arc, - /// A future that resolves when the service has exited, this is useful to - /// make sure any internally spawned futures stop when the service does. - exit: exit_future::Exit, - /// A signal that makes the exit future above resolve, fired on service drop. - signal: Option, /// Send a signal when a spawned essential task has concluded. The next time /// the service future is polled it should complete with an error. essential_failed_tx: mpsc::UnboundedSender<()>, /// A receiver for spawned essential-tasks concluding. essential_failed_rx: mpsc::UnboundedReceiver<()>, - /// Sender for futures that must be spawned as background tasks. - to_spawn_tx: mpsc::UnboundedSender<(Pin + Send>>, Cow<'static, str>)>, - /// Receiver for futures that must be spawned as background tasks. - to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, - /// How to spawn background tasks. - task_executor: Arc + Send>>) + Send + Sync>, rpc_handlers: sc_rpc_server::RpcHandler, _rpc: Box, _telemetry: Option, @@ -117,49 +106,10 @@ pub struct Service { _offchain_workers: Option>, keystore: sc_keystore::KeyStorePtr, marker: PhantomData, + prometheus_registry: Option, } -/// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc; - -/// An handle for spawning tasks in the service. -#[derive(Clone)] -pub struct SpawnTaskHandle { - sender: mpsc::UnboundedSender<(Pin + Send>>, Cow<'static, str>)>, - on_exit: exit_future::Exit, -} - -impl SpawnTaskHandle { - /// Spawns the given task with the given name. - pub fn spawn(&self, name: impl Into>, task: impl Future + Send + 'static) { - let on_exit = self.on_exit.clone(); - let future = async move { - futures::pin_mut!(task); - let _ = select(on_exit, task).await; - }; - if self.sender.unbounded_send((Box::pin(future), name.into())).is_err() { - error!("Failed to send task to spawn over channel"); - } - } -} - -impl Spawn for SpawnTaskHandle { - fn spawn_obj(&self, future: FutureObj<'static, ()>) - -> Result<(), SpawnError> { - let future = select(self.on_exit.clone(), future).map(drop); - self.sender.unbounded_send((Box::pin(future), From::from("unnamed"))) - .map_err(|_| SpawnError::shutdown()) - } -} - -type Boxed01Future01 = Box + Send + 'static>; - -impl futures01::future::Executor for SpawnTaskHandle { - fn execute(&self, future: Boxed01Future01) -> Result<(), futures01::future::ExecuteError>{ - self.spawn("unnamed", future.compat().map(drop)); - Ok(()) - } -} +impl Unpin for Service {} /// Abstraction over a Substrate service. pub trait AbstractService: 'static + Future> + @@ -176,8 +126,6 @@ pub trait AbstractService: 'static + Future> + type SelectChain: sp_consensus::SelectChain; /// Transaction pool. type TransactionPool: TransactionPool + MallocSizeOfWasm; - /// Network specialization. - type NetworkSpecialization: NetworkSpecialization; /// Get event stream for telemetry connection established events. fn telemetry_on_connect_stream(&self) -> futures::channel::mpsc::UnboundedReceiver<()>; @@ -218,7 +166,7 @@ pub trait AbstractService: 'static + Future> + /// Get shared network instance. fn network(&self) - -> Arc::Hash>>; + -> Arc::Hash>>; /// Returns a receiver that periodically receives a status of the network. fn network_status(&self, interval: Duration) -> mpsc::UnboundedReceiver<(NetworkStatus, NetworkState)>; @@ -227,21 +175,24 @@ pub trait AbstractService: 'static + Future> + fn transaction_pool(&self) -> Arc; /// Get a handle to a future that will resolve on exit. + #[deprecated(note = "Use `spawn_task`/`spawn_essential_task` instead, those functions will attach on_exit signal.")] fn on_exit(&self) -> ::exit_future::Exit; + + /// Get the prometheus metrics registry, if available. + fn prometheus_registry(&self) -> Option; } -impl AbstractService for +impl AbstractService for Service, TSc, NetworkStatus, - NetworkService, TExPool, TOc> + NetworkService, TExPool, TOc> where - TBl: BlockT + Unpin, + TBl: BlockT, TBackend: 'static + sc_client_api::backend::Backend, TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, TRtApi: 'static + Send + Sync, TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, TExPool: 'static + TransactionPool + MallocSizeOfWasm, TOc: 'static + Send + Sync, - TNetSpec: NetworkSpecialization, { type Block = TBl; type Backend = TBackend; @@ -249,7 +200,6 @@ where type RuntimeApi = TRtApi; type SelectChain = TSc; type TransactionPool = TExPool; - type NetworkSpecialization = TNetSpec; fn telemetry_on_connect_stream(&self) -> futures::channel::mpsc::UnboundedReceiver<()> { let (sink, stream) = futures::channel::mpsc::unbounded(); @@ -266,12 +216,7 @@ where } fn spawn_task(&self, name: impl Into>, task: impl Future + Send + 'static) { - let on_exit = self.on_exit(); - let task = async move { - futures::pin_mut!(task); - let _ = select(on_exit, task).await; - }; - let _ = self.to_spawn_tx.unbounded_send((Box::pin(task), name.into())); + self.task_manager.spawn(name, task) } fn spawn_essential_task(&self, name: impl Into>, task: impl Future + Send + 'static) { @@ -282,20 +227,12 @@ where error!("Essential task failed. Shutting down service."); let _ = essential_failed.send(()); }); - let on_exit = self.on_exit(); - let task = async move { - futures::pin_mut!(essential_task); - let _ = select(on_exit, essential_task).await; - }; - let _ = self.to_spawn_tx.unbounded_send((Box::pin(task), name.into())); + let _ = self.spawn_task(name, essential_task); } fn spawn_task_handle(&self) -> SpawnTaskHandle { - SpawnTaskHandle { - sender: self.to_spawn_tx.clone(), - on_exit: self.on_exit(), - } + self.task_manager.spawn_handle() } fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>> { @@ -315,7 +252,7 @@ where } fn network(&self) - -> Arc::Hash>> + -> Arc::Hash>> { self.network.clone() } @@ -331,11 +268,15 @@ where } fn on_exit(&self) -> exit_future::Exit { - self.exit.clone() + self.task_manager.on_exit() + } + + fn prometheus_registry(&self) -> Option { + self.prometheus_registry.clone() } } -impl Future for +impl Future for Service { type Output = Result<(), Error>; @@ -352,9 +293,7 @@ impl Future for } } - while let Poll::Ready(Some((task_to_spawn, name))) = Pin::new(&mut this.to_spawn_rx).poll_next(cx) { - (this.task_executor)(Box::pin(futures_diagnose::diagnose(name, task_to_spawn))); - } + this.task_manager.process_receiver(cx); // The service future never ends. Poll::Pending @@ -368,7 +307,7 @@ impl Spawn for &self, future: FutureObj<'static, ()> ) -> Result<(), SpawnError> { - self.to_spawn_tx.unbounded_send((Box::pin(future), From::from("unnamed"))) + self.task_manager.scheduler().unbounded_send((Box::pin(future), From::from("unnamed"))) .map_err(|_| SpawnError::shutdown()) } } @@ -379,11 +318,10 @@ impl Spawn for fn build_network_future< B: BlockT, C: sc_client::BlockchainEvents, - S: sc_network::specialization::NetworkSpecialization, H: sc_network::ExHashT > ( roles: Roles, - mut network: sc_network::NetworkWorker, + mut network: sc_network::NetworkWorker, client: Arc, status_sinks: Arc, NetworkState)>>>, mut rpc_rx: mpsc::UnboundedReceiver>, @@ -397,7 +335,7 @@ fn build_network_future< // We poll `imported_blocks_stream`. while let Poll::Ready(Some(notification)) = Pin::new(&mut imported_blocks_stream).poll_next(cx) { - network.on_block_imported(notification.hash, notification.header, Vec::new(), notification.is_new_best); + network.on_block_imported(notification.header, Vec::new(), notification.is_new_best); } // We poll `finality_notification_stream`, but we only take the last event. @@ -523,13 +461,26 @@ pub struct NetworkStatus { pub average_upload_per_sec: u64, } -impl Drop for - Service -{ - fn drop(&mut self) { - debug!(target: "service", "Substrate service shutdown"); - if let Some(signal) = self.signal.take() { - let _ = signal.fire(); +#[cfg(not(target_os = "unknown"))] +// Wrapper for HTTP and WS servers that makes sure they are properly shut down. +mod waiting { + pub struct HttpServer(pub Option); + impl Drop for HttpServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + server.wait(); + } + } + } + + pub struct WsServer(pub Option); + impl Drop for WsServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + let _ = server.wait(); + } } } } @@ -562,7 +513,7 @@ fn start_rpc_servers sc_rpc_server::RpcHandler sc_rpc_server::RpcHandler sc_network::TransactionPool for +impl sc_network::config::TransactionPool for TransactionPoolAdapter where - C: sc_network::ClientHandle + Send + Sync, + C: sc_network::config::Client + Send + Sync, Pool: 'static + TransactionPool, B: BlockT, H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, diff --git a/client/service/src/status_sinks.rs b/client/service/src/status_sinks.rs index de5fe865736af94911deb806f30eb6f64199e613..8e189be157be5234882d234fc64ed9709a3918a7 100644 --- a/client/service/src/status_sinks.rs +++ b/client/service/src/status_sinks.rs @@ -122,28 +122,17 @@ mod tests { let (tx, rx) = mpsc::unbounded(); status_sinks.push(Duration::from_millis(100), tx); - let mut runtime = tokio::runtime::Runtime::new().unwrap(); - let mut val_order = 5; - runtime.spawn(futures::future::poll_fn(move |cx| { - status_sinks.poll(cx, || { val_order += 1; val_order }); - Poll::<()>::Pending - })); - - let done = rx - .into_future() - .then(|(item, rest)| { - assert_eq!(item, Some(6)); - rest.into_future() - }) - .then(|(item, rest)| { - assert_eq!(item, Some(7)); - rest.into_future() - }) - .map(|(item, _)| { - assert_eq!(item, Some(8)); - }); - runtime.block_on(done); + futures::executor::block_on(futures::future::select( + futures::future::poll_fn(move |cx| { + status_sinks.poll(cx, || { val_order += 1; val_order }); + Poll::<()>::Pending + }), + Box::pin(async { + let items: Vec = rx.take(3).collect().await; + assert_eq!(items, [6, 7, 8]); + }) + )); } } diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs new file mode 100644 index 0000000000000000000000000000000000000000..d7041e44b9c1e17eaa2d82e0e8a80467ddcffcb3 --- /dev/null +++ b/client/service/src/task_manager.rs @@ -0,0 +1,190 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +//! Substrate service tasks management module. + +use std::{ + result::Result, sync::Arc, + task::{Poll, Context}, + borrow::Cow, pin::Pin, +}; +use exit_future::Signal; +use log::{debug, error}; +use futures::{ + Future, FutureExt, Stream, + future::select, channel::mpsc, + compat::*, + task::{Spawn, FutureObj, SpawnError}, +}; + +/// Type alias for service task executor (usually runtime). +pub type ServiceTaskExecutor = Arc + Send>>) + Send + Sync>; + +/// Type alias for the task scheduler. +pub type TaskScheduler = mpsc::UnboundedSender<(Pin + Send>>, Cow<'static, str>)>; + +/// Helper struct to setup background tasks execution for service. +pub struct TaskManagerBuilder { + /// A future that resolves when the service has exited, this is useful to + /// make sure any internally spawned futures stop when the service does. + on_exit: exit_future::Exit, + /// A signal that makes the exit future above resolve, fired on service drop. + signal: Option, + /// Sender for futures that must be spawned as background tasks. + to_spawn_tx: TaskScheduler, + /// Receiver for futures that must be spawned as background tasks. + to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, +} + +impl TaskManagerBuilder { + /// New asynchronous task manager setup. + pub fn new() -> Self { + let (signal, on_exit) = exit_future::signal(); + let (to_spawn_tx, to_spawn_rx) = mpsc::unbounded(); + Self { + on_exit, + signal: Some(signal), + to_spawn_tx, + to_spawn_rx, + } + } + + /// Get spawn handle. + /// + /// Tasks spawned through this handle will get scheduled once + /// service is up and running. + pub fn spawn_handle(&self) -> SpawnTaskHandle { + SpawnTaskHandle { + on_exit: self.on_exit.clone(), + sender: self.to_spawn_tx.clone(), + } + } + + /// Convert into actual task manager from initial setup. + pub(crate) fn into_task_manager(self, executor: ServiceTaskExecutor) -> TaskManager { + let TaskManagerBuilder { + on_exit, + signal, + to_spawn_rx, + to_spawn_tx + } = self; + TaskManager { + on_exit, + signal, + to_spawn_tx, + to_spawn_rx, + executor, + } + } +} + +/// An handle for spawning tasks in the service. +#[derive(Clone)] +pub struct SpawnTaskHandle { + sender: TaskScheduler, + on_exit: exit_future::Exit, +} + +impl SpawnTaskHandle { + /// Spawns the given task with the given name. + pub fn spawn(&self, name: impl Into>, task: impl Future + Send + 'static) { + let on_exit = self.on_exit.clone(); + let future = async move { + futures::pin_mut!(task); + let _ = select(on_exit, task).await; + }; + if self.sender.unbounded_send((Box::pin(future), name.into())).is_err() { + error!("Failed to send task to spawn over channel"); + } + } +} + +impl Spawn for SpawnTaskHandle { + fn spawn_obj(&self, future: FutureObj<'static, ()>) + -> Result<(), SpawnError> { + let future = select(self.on_exit.clone(), future).map(drop); + self.sender.unbounded_send((Box::pin(future), From::from("unnamed"))) + .map_err(|_| SpawnError::shutdown()) + } +} + +type Boxed01Future01 = Box + Send + 'static>; + +impl futures01::future::Executor for SpawnTaskHandle { + fn execute(&self, future: Boxed01Future01) -> Result<(), futures01::future::ExecuteError>{ + self.spawn("unnamed", future.compat().map(drop)); + Ok(()) + } +} + +/// Helper struct to manage background/async tasks in Service. +pub struct TaskManager { + /// A future that resolves when the service has exited, this is useful to + /// make sure any internally spawned futures stop when the service does. + on_exit: exit_future::Exit, + /// A signal that makes the exit future above resolve, fired on service drop. + signal: Option, + /// Sender for futures that must be spawned as background tasks. + to_spawn_tx: TaskScheduler, + /// Receiver for futures that must be spawned as background tasks. + to_spawn_rx: mpsc::UnboundedReceiver<(Pin + Send>>, Cow<'static, str>)>, + /// How to spawn background tasks. + executor: ServiceTaskExecutor, +} + +impl TaskManager { + /// Spawn background/async task, which will be aware on exit signal. + pub(super) fn spawn(&self, name: impl Into>, task: impl Future + Send + 'static) { + let on_exit = self.on_exit.clone(); + let future = async move { + futures::pin_mut!(task); + let _ = select(on_exit, task).await; + }; + if self.to_spawn_tx.unbounded_send((Box::pin(future), name.into())).is_err() { + error!("Failed to send task to spawn over channel"); + } + } + + pub(super) fn spawn_handle(&self) -> SpawnTaskHandle { + SpawnTaskHandle { + on_exit: self.on_exit.clone(), + sender: self.to_spawn_tx.clone(), + } + } + + /// Get sender where background/async tasks can be sent. + pub(super) fn scheduler(&self) -> TaskScheduler { + self.to_spawn_tx.clone() + } + + /// Process background task receiver. + pub(super) fn process_receiver(&mut self, cx: &mut Context) { + while let Poll::Ready(Some((task_to_spawn, name))) = Pin::new(&mut self.to_spawn_rx).poll_next(cx) { + (self.executor)(Box::pin(futures_diagnose::diagnose(name, task_to_spawn))); + } + } + + /// Clone on exit signal. + pub(super) fn on_exit(&self) -> exit_future::Exit { + self.on_exit.clone() + } +} + +impl Drop for TaskManager { + fn drop(&mut self) { + debug!(target: "service", "Tasks manager shutdown"); + if let Some(signal) = self.signal.take() { + let _ = signal.fire(); + } + } +} diff --git a/client/service/test/Cargo.toml b/client/service/test/Cargo.toml index c9dbe97464aa135a12dc2928eb2262478152315d..f27f8803300b30d86f6ad605541bed438788ffb7 100644 --- a/client/service/test/Cargo.toml +++ b/client/service/test/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-service-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] tempfile = "3.1.0" @@ -13,10 +16,10 @@ log = "0.4.8" env_logger = "0.7.0" fdlimit = "0.1.1" futures = { version = "0.3.1", features = ["compat"] } -sc-service = { version = "0.8.0", default-features = false, path = "../../service" } -sc-network = { version = "0.8", path = "../../network" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -sc-client = { version = "0.8", path = "../../" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sc-service = { version = "0.8.0-alpha.2", default-features = false, path = "../../service" } +sc-network = { version = "0.8.0-alpha.2", path = "../../network" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sc-client = { version = "0.8.0-alpha.2", path = "../../" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index b65bccc151823af8bc6db72702de57d97467a032..03cccbe4a053ad8b1a5b51a23035a0ac942355a2 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -141,7 +141,7 @@ fn node_config ( { let root = root.path().join(format!("node-{}", index)); - let config_path = Some(String::from(root.join("network").to_str().unwrap())); + let config_path = Some(root.join("network")); let net_config_path = config_path.clone(); let network_config = NetworkConfiguration { @@ -199,7 +199,7 @@ fn node_config ( rpc_ws: None, rpc_ws_max_connections: None, rpc_cors: None, - grafana_port: None, + prometheus_config: None, telemetry_endpoints: None, telemetry_external_transport: None, default_heap_pages: None, diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 18ad5b113e983f98b94e384b5432357fe2e1d4e6..b5206d3c461a85f3ed41a3e12feeea5b296485fc 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -17,7 +17,7 @@ use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; use codec::{Encode, Decode}; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HasherFor, NumberFor}, + generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, }; use sp_state_machine::{ self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, @@ -80,7 +80,6 @@ where let changes_trie = backend::changes_tries_state_at_block( id, self.backend.changes_trie_storage() )?; - // make sure to destroy state before exiting this function let state = self.backend.state_at(*id)?; let return_data = StateMachine::new( &state, @@ -93,12 +92,9 @@ where ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( strategy.get_manager(), None, - ); - { - let _lock = self.backend.get_import_lock().read(); - self.backend.destroy_state(state)?; - } - Ok(return_data?.into_encoded()) + )?; + + Ok(return_data.into_encoded()) } fn contextual_call< @@ -138,9 +134,8 @@ where let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - // make sure to destroy state before exiting this function let mut state = self.backend.state_at(*at)?; - let result = match recorder { + match recorder { Some(recorder) => state.as_trie_backend() .ok_or_else(|| Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) @@ -176,18 +171,15 @@ where ) .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) .execute_using_consensus_failure_handler(execution_manager, native_call) - }; - { - let _lock = self.backend.get_import_lock().read(); - self.backend.destroy_state(state)?; - } - result.map_err(Into::into) + }.map_err(Into::into) } fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { let mut overlay = OverlayedChanges::default(); - let changes_trie_state = backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; - // make sure to destroy state before exiting this function + let changes_trie_state = backend::changes_tries_state_at_block( + id, + self.backend.changes_trie_storage(), + )?; let state = self.backend.state_at(*id)?; let mut cache = StorageTransactionCache::::default(); let mut ext = Ext::new( @@ -197,17 +189,13 @@ where changes_trie_state, None, ); - let version = self.executor.runtime_version(&mut ext); - { - let _lock = self.backend.get_import_lock().read(); - self.backend.destroy_state(state)?; - } - version.map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + self.executor.runtime_version(&mut ext) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) } - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, - trie_state: &sp_state_machine::TrieBackend>, + trie_state: &sp_state_machine::TrieBackend>, overlay: &mut OverlayedChanges, method: &str, call_data: &[u8] diff --git a/client/src/cht.rs b/client/src/cht.rs index 1435b77ec592e21d1a05be901662653dc6fcfbc1..de67280632302395e2363f6d309b2fecbf75d9ad 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -331,8 +331,8 @@ pub fn decode_cht_value(value: &[u8]) -> Option { #[cfg(test)] mod tests { - use sp_core::Blake2Hasher; use substrate_test_runtime_client::runtime::Header; + use sp_runtime::traits::BlakeTwo256; use super::*; #[test] @@ -398,7 +398,7 @@ mod tests { #[test] fn compute_root_works() { - assert!(compute_root::( + assert!(compute_root::( SIZE as _, 42, ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) @@ -409,7 +409,7 @@ mod tests { #[test] #[should_panic] fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( + assert!(build_proof::( SIZE as _, 0, vec![(SIZE * 1000) as u64], @@ -420,7 +420,7 @@ mod tests { #[test] fn build_proof_works() { - assert!(build_proof::( + assert!(build_proof::( SIZE as _, 0, vec![(SIZE / 2) as u64], diff --git a/client/src/client.rs b/client/src/client.rs index e9a8f1228c5e19e15ba21d6702301342b623b2cf..699e3320ff4b7dda7137d6fadec1a4b786d5b9bb 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -34,7 +34,7 @@ use sp_runtime::{ Justification, BuildStorage, generic::{BlockId, SignedBlock, DigestItem}, traits::{ - Block as BlockT, Header as HeaderT, Zero, NumberFor, HasherFor, SaturatedConversion, One, + Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor, SaturatedConversion, One, DigestFor, }, }; @@ -60,13 +60,14 @@ use sp_api::{ CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, CallApiAtParams, }; -use sc_block_builder::BlockBuilderApi; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; pub use sc_client_api::{ backend::{ self, BlockImportOperation, PrunableStateChangesTrieStorage, ClientImportOperation, Finalizer, ImportSummary, NewBlockState, - changes_tries_state_at_block, + changes_tries_state_at_block, StorageProvider, + LockImportRun, }, client::{ ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, @@ -75,15 +76,17 @@ pub use sc_client_api::{ }, execution_extensions::{ExecutionExtensions, ExecutionStrategies}, notifications::{StorageNotifications, StorageEventStream}, - CallExecutor, + CallExecutor, ExecutorProvider, ProofProvider, }; use sp_blockchain::Error; +use prometheus_endpoint::Registry; use crate::{ call_executor::LocalCallExecutor, light::{call_executor::prove_execution, fetcher::ChangesProof}, in_mem, genesis, cht, block_rules::{BlockRules, LookupResult as BlockLookupResult}, }; +use crate::client::backend::KeyIterator; /// Substrate Client pub struct Client where Block: BlockT { @@ -99,46 +102,6 @@ pub struct Client where Block: BlockT { _phantom: PhantomData, } -/// An `Iterator` that iterates keys in a given block under a prefix. -pub struct KeyIterator<'a, State, Block> { - state: State, - prefix: Option<&'a StorageKey>, - current_key: Vec, - _phantom: PhantomData, -} - -impl <'a, State, Block> KeyIterator<'a, State, Block> { - fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { - Self { - state, - prefix, - current_key, - _phantom: PhantomData, - } - } -} - -impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where - Block: BlockT, - State: StateBackend>, -{ - type Item = StorageKey; - - fn next(&mut self) -> Option { - let next_key = self.state - .next_storage_key(&self.current_key) - .ok() - .flatten()?; - if let Some(prefix) = self.prefix { - if !next_key.starts_with(&prefix.0[..]) { - return None; - } - } - self.current_key = next_key.clone(); - Some(StorageKey(next_key)) - } -} - // used in importing a block, where additional changes are made after the runtime // executed. enum PrePostHeader { @@ -171,6 +134,7 @@ pub fn new_in_mem( executor: E, genesis_storage: &S, keystore: Option, + prometheus_registry: Option, ) -> sp_blockchain::Result, LocalCallExecutor, E>, @@ -181,7 +145,7 @@ pub fn new_in_mem( S: BuildStorage, Block: BlockT, { - new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore) + new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore, prometheus_registry) } /// Create a client with the explicitly provided backend. @@ -191,6 +155,7 @@ pub fn new_with_backend( executor: E, build_genesis_storage: &S, keystore: Option, + prometheus_registry: Option, ) -> sp_blockchain::Result, Block, RA>> where E: CodeExecutor + RuntimeInfo, @@ -207,6 +172,7 @@ pub fn new_with_backend( Default::default(), Default::default(), extensions, + prometheus_registry, ) } @@ -218,6 +184,61 @@ impl BlockOf for Client where type Type = Block; } +impl LockImportRun for Client + where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + let inner = || { + let _import_lock = self.backend.get_import_lock().write(); + + let mut op = ClientImportOperation { + op: self.backend.begin_operation()?, + notify_imported: None, + notify_finalized: Vec::new(), + }; + + let r = f(&mut op)?; + + let ClientImportOperation { op, notify_imported, notify_finalized } = op; + self.backend.commit_operation(op)?; + self.notify_finalized(notify_finalized)?; + + if let Some(notify_imported) = notify_imported { + self.notify_imported(notify_imported)?; + } + + Ok(r) + }; + + let result = inner(); + *self.importing_block.write() = None; + + result + } +} + +impl LockImportRun for &Client + where + Block: BlockT, + B: backend::Backend, + E: CallExecutor, +{ + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + (**self).lock_import_and_run(f) + } +} + impl Client where B: backend::Backend, E: CallExecutor, @@ -231,6 +252,7 @@ impl Client where fork_blocks: ForkBlocks, bad_blocks: BadBlocks, execution_extensions: ExecutionExtensions, + _prometheus_registry: Option, ) -> sp_blockchain::Result { if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { let genesis_storage = build_genesis_storage.build_storage()?; @@ -264,119 +286,14 @@ impl Client where }) } - /// Get a reference to the execution extensions. - pub fn execution_extensions(&self) -> &ExecutionExtensions { - &self.execution_extensions - } - /// Get a reference to the state at a given block. pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { self.backend.state_at(*block) } - /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. - pub fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { - let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); - Ok(keys) - } - - /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. - pub fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) - -> sp_blockchain::Result> - { - let state = self.state_at(id)?; - let keys = state - .keys(&key_prefix.0) - .into_iter() - .map(|k| { - let d = state.storage(&k).ok().flatten().unwrap_or_default(); - (StorageKey(k), StorageData(d)) - }) - .collect(); - Ok(keys) - } - - /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. - pub fn storage_keys_iter<'a>( - &self, - id: &BlockId, - prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> - ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; - let start_key = start_key - .or(prefix) - .map(|key| key.0.clone()) - .unwrap_or_else(Vec::new); - Ok(KeyIterator::new(state, prefix, start_key)) - } - - /// Given a `BlockId` and a key, return the value under the key in that block. - pub fn storage(&self, id: &BlockId, key: &StorageKey) - -> sp_blockchain::Result> - { - Ok(self.state_at(id)? - .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData) - ) - } - - /// Given a `BlockId` and a key, return the value under the hash in that block. - pub fn storage_hash(&self, id: &BlockId, key: &StorageKey) - -> sp_blockchain::Result> - { - Ok(self.state_at(id)? - .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) - } - - /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. - pub fn child_storage_keys( - &self, - id: &BlockId, - child_storage_key: &StorageKey, - child_info: ChildInfo, - key_prefix: &StorageKey - ) -> sp_blockchain::Result> { - let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, child_info, &key_prefix.0) - .into_iter() - .map(StorageKey) - .collect(); - Ok(keys) - } - - /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. - pub fn child_storage( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .child_storage(&storage_key.0, child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData)) - } - - /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. - pub fn child_storage_hash( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .child_storage_hash(&storage_key.0, child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) - } - /// Get the code at a given block. pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { - Ok(self.storage(id, &StorageKey(well_known_keys::CODE.to_vec()))? + Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? .expect("None is returned if there's no value stored for the given key;\ ':code' key is always defined; qed").0) } @@ -386,57 +303,6 @@ impl Client where self.executor.runtime_version(id) } - /// Get call executor reference. - pub fn executor(&self) -> &E { - &self.executor - } - - /// Reads storage value at a given block + key, returning read proof. - pub fn read_proof(&self, id: &BlockId, keys: I) -> sp_blockchain::Result where - I: IntoIterator, - I::Item: AsRef<[u8]>, - { - self.state_at(id) - .and_then(|state| prove_read(state, keys) - .map_err(Into::into)) - } - - /// Reads child storage value at a given block + storage_key + key, returning - /// read proof. - pub fn read_child_proof( - &self, - id: &BlockId, - storage_key: &[u8], - child_info: ChildInfo, - keys: I, - ) -> sp_blockchain::Result where - I: IntoIterator, - I::Item: AsRef<[u8]>, - { - self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) - .map_err(Into::into)) - } - - /// Execute a call to a contract on top of state in a block of given hash - /// AND returning execution proof. - /// - /// No changes are made. - pub fn execution_proof(&self, - id: &BlockId, - method: &str, - call_data: &[u8] - ) -> sp_blockchain::Result<(Vec, StorageProof)> { - let state = self.state_at(id)?; - let header = self.prepare_environment_block(id)?; - prove_execution(state, header, &self.executor, method, call_data) - } - - /// Reads given header and generates CHT-based header proof. - pub fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - self.header_proof_with_cht_size(id, cht::size()) - } - /// Get block hash by number. pub fn block_hash(&self, block_number: <::Header as HeaderT>::Number @@ -462,7 +328,7 @@ impl Client where Some(old_current_num) }); let headers = cht_range.map(|num| self.block_hash(num)); - let proof = cht::build_proof::, _, _>( + let proof = cht::build_proof::, _, _>( cht_size, cht_num, std::iter::once(block_num), @@ -471,112 +337,6 @@ impl Client where Ok((header, proof)) } - /// Get longest range within [first; last] that is possible to use in `key_changes` - /// and `key_changes_proof` calls. - /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes tries are not supported. - pub fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); - } - - let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { - Some((storage, configs)) => (storage, configs), - None => return Ok(None), - }; - - let first_available_changes_trie = configs.last().map(|config| config.0); - match first_available_changes_trie { - Some(first_available_changes_trie) => { - let oldest_unpruned = storage.oldest_pruned_digest_range_end(); - let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); - Ok(Some((first, last))) - }, - None => Ok(None) - } - } - - /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. - /// Works only for runtimes that are supporting changes tries. - /// - /// Changes are returned in descending order (i.e. last block comes first). - pub fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&StorageKey>, - key: &StorageKey - ) -> sp_blockchain::Result, u32)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; - - let mut result = Vec::new(); - let best_number = self.backend.blockchain().info().best_number; - for (config_zero, config_end, config) in configs { - let range_first = ::std::cmp::max(first, config_zero + One::one()); - let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => if last_number > config_end_number { - ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } - } else { - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } - }, - None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, - }; - - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero.clone(), - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( - config_range, - storage.storage(), - range_first, - &range_anchor, - best_number, - storage_key.as_ref().map(|x| &x.0[..]), - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } - - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. - /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using - /// changes tries from ascendants of this block, we should provide proofs for changes tries roots - /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants - /// of this block. - /// Works only for runtimes that are supporting changes tries. - pub fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&StorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size( - first, - last, - min, - max, - storage_key, - key, - cht::size(), - ) - } - /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. pub fn key_changes_proof_with_cht_size( &self, @@ -589,12 +349,12 @@ impl Client where cht_size: NumberFor, ) -> sp_blockchain::Result> { struct AccessedRootsRecorder<'a, Block: BlockT> { - storage: &'a dyn ChangesTrieStorage, NumberFor>, + storage: &'a dyn ChangesTrieStorage, NumberFor>, min: NumberFor, required_roots_proofs: Mutex, Block::Hash>>, }; - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for + impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for AccessedRootsRecorder<'a, Block> { fn build_anchor(&self, hash: Block::Hash) @@ -621,11 +381,11 @@ impl Client where } } - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for + impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for AccessedRootsRecorder<'a, Block> { fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> + -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { self } @@ -669,7 +429,7 @@ impl Client where zero: config_zero, end: config_end.map(|(config_end_number, _)| config_end_number), }; - let proof_range = key_changes_proof::, _>( + let proof_range = key_changes_proof::, _>( config_range, &recording_storage, first_number, @@ -736,7 +496,7 @@ impl Client where .map(|block| block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())) ); - let proof = cht::build_proof::, _, _>( + let proof = cht::build_proof::, _, _>( cht_size, cht_num, blocks, @@ -808,66 +568,6 @@ impl Client where ) } - /// Create a new block, built on top of `parent`. - /// - /// When proof recording is enabled, all accessed trie nodes are saved. - /// These recorded trie nodes can be used by a third party to proof the - /// output of this block builder without having access to the full storage. - pub fn new_block_at>( - &self, - parent: &BlockId, - inherent_digests: DigestFor, - record_proof: R, - ) -> sp_blockchain::Result> where - E: Clone + Send + Sync, - RA: Send + Sync, - Self: ProvideRuntimeApi, - >::Api: BlockBuilderApi + - ApiExt> - { - sc_block_builder::BlockBuilder::new( - self, - self.expect_block_hash_from_id(parent)?, - self.expect_block_number_from_id(parent)?, - record_proof.into(), - inherent_digests, - &self.backend - ) - } - - /// Lock the import lock, and run operations inside. - pub fn lock_import_and_run(&self, f: F) -> Result where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, - { - let inner = || { - let _import_lock = self.backend.get_import_lock().write(); - - let mut op = ClientImportOperation { - op: self.backend.begin_operation()?, - notify_imported: None, - notify_finalized: Vec::new(), - }; - - let r = f(&mut op)?; - - let ClientImportOperation { op, notify_imported, notify_finalized } = op; - self.backend.commit_operation(op)?; - self.notify_finalized(notify_finalized)?; - - if let Some(notify_imported) = notify_imported { - self.notify_imported(notify_imported)?; - } - - Ok(r) - }; - - let result = inner(); - *self.importing_block.write() = None; - - result - } - /// Apply a checked and validated block to an operation. If a justification is provided /// then `finalized` *must* be true. fn apply_block( @@ -1130,15 +830,7 @@ impl Client where &state, changes_trie_state.as_ref(), *parent_hash, - ); - - { - let _lock = self.backend.get_import_lock().read(); - self.backend.destroy_state(state)?; - } - - // Make sure to consume the error, only after we have destroyed the state. - let gen_storage_changes = gen_storage_changes?; + )?; if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root @@ -1340,19 +1032,8 @@ impl Client where } /// Get block justification set by id. - pub fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().justification(*id) - } - - /// Get full block by id. - pub fn block(&self, id: &BlockId) - -> sp_blockchain::Result>> - { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), justification) => - Some(SignedBlock { block: Block::new(header, extrinsics), justification }), - _ => None, - }) + pub fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justification(*id) } /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. @@ -1399,6 +1080,286 @@ impl Client where } } +impl ProofProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn read_proof( + &self, + id: &BlockId, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id) + .and_then(|state| prove_read(state, keys) + .map_err(Into::into)) + } + + fn read_child_proof( + &self, + id: &BlockId, + storage_key: &[u8], + child_info: ChildInfo, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id) + .and_then(|state| prove_child_read(state, storage_key, child_info, keys) + .map_err(Into::into)) + } + + fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8] + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + let state = self.state_at(id)?; + let header = self.prepare_environment_block(id)?; + prove_execution(state, header, &self.executor, method, call_data) + } + + fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + self.header_proof_with_cht_size(id, cht::size()) + } + + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.key_changes_proof_with_cht_size( + first, + last, + min, + max, + storage_key, + key, + cht::size(), + ) + } +} + + +impl BlockBuilderProvider for Client + where + B: backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Block: BlockT, + Self: ChainHeaderBackend + ProvideRuntimeApi, + >::Api: ApiExt> + + BlockBuilderApi, +{ + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result> { + sc_block_builder::BlockBuilder::new( + self, + self.expect_block_hash_from_id(parent)?, + self.expect_block_number_from_id(parent)?, + record_proof.into(), + inherent_digests, + &self.backend + ) + } +} + +impl ExecutorProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + type Executor = E; + + fn executor(&self) -> &Self::Executor { + &self.executor + } + + fn execution_extensions(&self) -> &ExecutionExtensions { + &self.execution_extensions + } +} + +impl StorageProvider for Client where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, +{ + fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { + let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); + Ok(keys) + } + + fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) + -> sp_blockchain::Result> + { + let state = self.state_at(id)?; + let keys = state + .keys(&key_prefix.0) + .into_iter() + .map(|k| { + let d = state.storage(&k).ok().flatten().unwrap_or_default(); + (StorageKey(k), StorageData(d)) + }) + .collect(); + Ok(keys) + } + + + fn storage_keys_iter<'a>( + &self, + id: &BlockId, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey> + ) -> sp_blockchain::Result> { + let state = self.state_at(id)?; + let start_key = start_key + .or(prefix) + .map(|key| key.0.clone()) + .unwrap_or_else(Vec::new); + Ok(KeyIterator::new(state, prefix, start_key)) + } + + + fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result> + { + Ok(self.state_at(id)? + .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData) + ) + } + + + fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result> + { + Ok(self.state_at(id)? + .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + ) + } + + + fn child_storage_keys( + &self, + id: &BlockId, + child_storage_key: &StorageKey, + child_info: ChildInfo, + key_prefix: &StorageKey + ) -> sp_blockchain::Result> { + let keys = self.state_at(id)? + .child_keys(&child_storage_key.0, child_info, &key_prefix.0) + .into_iter() + .map(StorageKey) + .collect(); + Ok(keys) + } + + + fn child_storage( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .child_storage(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) + } + + + fn child_storage_hash( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey + ) -> sp_blockchain::Result> { + Ok(self.state_at(id)? + .child_storage_hash(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + ) + } + + fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> sp_blockchain::Result, BlockId)>> { + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + if first > last_number { + return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); + } + + let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { + Some((storage, configs)) => (storage, configs), + None => return Ok(None), + }; + + let first_available_changes_trie = configs.last().map(|config| config.0); + match first_available_changes_trie { + Some(first_available_changes_trie) => { + let oldest_unpruned = storage.oldest_pruned_digest_range_end(); + let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); + Ok(Some((first, last))) + }, + None => Ok(None) + } + } + + fn key_changes( + &self, + first: NumberFor, + last: BlockId, + storage_key: Option<&StorageKey>, + key: &StorageKey + ) -> sp_blockchain::Result, u32)>> { + let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; + + let mut result = Vec::new(); + let best_number = self.backend.blockchain().info().best_number; + for (config_zero, config_end, config) in configs { + let range_first = ::std::cmp::max(first, config_zero + One::one()); + let range_anchor = match config_end { + Some((config_end_number, config_end_hash)) => if last_number > config_end_number { + ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } + } else { + ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } + }, + None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, + }; + + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero.clone(), + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( + config_range, + storage.storage(), + range_first, + &range_anchor, + best_number, + storage_key.as_ref().map(|x| &x.0[..]), + &key.0) + .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) + } +} + impl HeaderMetadata for Client where B: backend::Backend, E: CallExecutor, @@ -1823,7 +1784,9 @@ where fn best_block_header(&self) -> sp_blockchain::Result<::Header> { let info = self.backend.blockchain().info(); let import_lock = self.backend.get_import_lock(); - let best_hash = self.backend.blockchain().best_containing(info.best_hash, None, import_lock)? + let best_hash = self.backend + .blockchain() + .best_containing(info.best_hash, None, import_lock)? .unwrap_or(info.best_hash); Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? @@ -1876,6 +1839,15 @@ impl BlockBody for Client ) -> sp_blockchain::Result::Extrinsic>>> { self.body(id) } + + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> + { + Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { + (Some(header), Some(extrinsics), justification) => + Some(SignedBlock { block: Block::new(header, extrinsics), justification }), + _ => None, + }) + } } impl backend::AuxStore for Client diff --git a/client/src/genesis.rs b/client/src/genesis.rs index fccdd71817e1fe0fae978bef8a52d166d4aeb1f1..0eecc6cdae8908188b48291edeac9ad3da4c0422 100644 --- a/client/src/genesis.rs +++ b/client/src/genesis.rs @@ -53,7 +53,7 @@ mod tests { runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest}, AccountKeyring, Sr25519Keyring, }; - use sp_core::Blake2Hasher; + use sp_runtime::traits::BlakeTwo256; use hex_literal::*; native_executor_instance!( @@ -67,7 +67,7 @@ mod tests { } fn construct_block( - backend: &InMemoryBackend, + backend: &InMemoryBackend, number: BlockNumber, parent_hash: Hash, state_root: Hash, @@ -78,7 +78,7 @@ mod tests { let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + let extrinsics_root = Layout::::ordered_trie_root(iter).into(); let mut header = Header { parent_hash, @@ -132,7 +132,7 @@ mod tests { (vec![].and(&Block { header, extrinsics: transactions }), hash) } - fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { + fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { construct_block( backend, 1, diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index dcff8102aeb6d497d72f367c5c33d0eb3a726685..bdbfdbc7ec8f03cba799075dd84274860b018ab3 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -24,7 +24,7 @@ use sp_core::offchain::storage::{ InMemOffchainStorage as OffchainStorage }; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HasherFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, @@ -462,8 +462,8 @@ impl sc_client_api::light::Storage for Blockchain pub struct BlockImportOperation { pending_block: Option>, pending_cache: HashMap>, - old_state: InMemoryBackend>, - new_state: Option>>, + old_state: InMemoryBackend>, + new_state: Option>>, aux: Vec<(Vec, Option>)>, finalized_blocks: Vec<(BlockId, Option)>, set_head: Option>, @@ -472,7 +472,7 @@ pub struct BlockImportOperation { impl backend::BlockImportOperation for BlockImportOperation where Block::Hash: Ord, { - type State = InMemoryBackend>; + type State = InMemoryBackend>; fn state(&self) -> sp_blockchain::Result> { Ok(Some(&self.old_state)) @@ -499,7 +499,7 @@ impl backend::BlockImportOperation for BlockImportOperatio fn update_db_storage( &mut self, - update: > as StateBackend>>::Transaction, + update: > as StateBackend>>::Transaction, ) -> sp_blockchain::Result<()> { self.new_state = Some(self.old_state.update(update)); Ok(()) @@ -507,7 +507,7 @@ impl backend::BlockImportOperation for BlockImportOperatio fn update_changes_trie( &mut self, - _update: ChangesTrieTransaction, NumberFor>, + _update: ChangesTrieTransaction, NumberFor>, ) -> sp_blockchain::Result<()> { Ok(()) } @@ -564,7 +564,7 @@ impl backend::BlockImportOperation for BlockImportOperatio /// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this /// > struct for testing purposes. Do **NOT** use in production. pub struct Backend where Block::Hash: Ord { - states: RwLock>>>, + states: RwLock>>>, blockchain: Blockchain, import_lock: RwLock<()>, } @@ -599,7 +599,7 @@ impl backend::AuxStore for Backend where Block::Hash: Ord impl backend::Backend for Backend where Block::Hash: Ord { type BlockImportOperation = BlockImportOperation; type Blockchain = Blockchain; - type State = InMemoryBackend>; + type State = InMemoryBackend>; type OffchainStorage = OffchainStorage; fn begin_operation(&self) -> sp_blockchain::Result { diff --git a/client/src/lib.rs b/client/src/lib.rs index d97246d478c2b6df0eb13128c4ceba23d9ccd6b0..9c6393314f06a0a57fe2c75c981ba1f938ad383d 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -47,7 +47,6 @@ //! ``` //! use std::sync::Arc; //! use sc_client::{Client, in_mem::Backend, LocalCallExecutor}; -//! use sp_core::Blake2Hasher; //! use sp_runtime::Storage; //! use sc_executor::{NativeExecutor, WasmExecutionMethod}; //! @@ -69,6 +68,7 @@ //! Default::default(), //! Default::default(), //! Default::default(), +//! None, //! ); //! ``` //! @@ -98,7 +98,7 @@ pub use crate::{ client::{ new_with_backend, new_in_mem, - BlockBody, ImportNotifications, FinalityNotifications, BlockchainEvents, + BlockBody, ImportNotifications, FinalityNotifications, BlockchainEvents, LockImportRun, BlockImportNotification, Client, ClientInfo, ExecutionStrategies, FinalityNotification, LongestChain, BlockOf, ProvideUncles, BadBlocks, ForkBlocks, apply_aux, }, diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index ad9f43587e4cdba28e50a79b3237a703100bdbde..6b5f9263009a2d370d814a545e2a3cde700f0123 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -31,7 +31,7 @@ use sp_state_machine::{ StorageCollection, ChildStorageCollection, }; use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HasherFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; use crate::in_mem::check_genesis_storage; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ @@ -65,7 +65,7 @@ pub struct ImportOperation { aux_ops: Vec<(Vec, Option>)>, finalized_blocks: Vec>, set_head: Option>, - storage_update: Option>>, + storage_update: Option>>, changes_trie_config_update: Option>, _phantom: std::marker::PhantomData, } @@ -111,7 +111,7 @@ impl AuxStore for Backend { } } -impl ClientBackend for Backend> +impl ClientBackend for Backend> where Block: BlockT, S: BlockchainStorage, @@ -119,7 +119,7 @@ impl ClientBackend for Backend> { type BlockImportOperation = ImportOperation; type Blockchain = Blockchain; - type State = GenesisOrUnavailableState>; + type State = GenesisOrUnavailableState>; type OffchainStorage = InMemOffchainStorage; fn begin_operation(&self) -> ClientResult { @@ -238,7 +238,7 @@ impl ClientBackend for Backend> } } -impl RemoteBackend for Backend> +impl RemoteBackend for Backend> where Block: BlockT, S: BlockchainStorage + 'static, @@ -262,7 +262,7 @@ impl BlockImportOperation for ImportOperation S: BlockchainStorage, Block::Hash: Ord, { - type State = GenesisOrUnavailableState>; + type State = GenesisOrUnavailableState>; fn state(&self) -> ClientResult> { // None means 'locally-stateless' backend @@ -287,7 +287,7 @@ impl BlockImportOperation for ImportOperation fn update_db_storage( &mut self, - _update: >>::Transaction, + _update: >>::Transaction, ) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) @@ -295,7 +295,7 @@ impl BlockImportOperation for ImportOperation fn update_changes_trie( &mut self, - _update: ChangesTrieTransaction, NumberFor>, + _update: ChangesTrieTransaction, NumberFor>, ) -> ClientResult<()> { // we're not storing anything locally => ignore changes Ok(()) @@ -515,10 +515,10 @@ impl StateBackend for GenesisOrUnavailableState #[cfg(test)] mod tests { - use sp_core::Blake2Hasher; use substrate_test_runtime_client::{self, runtime::Block}; use sc_client_api::backend::NewBlockState; use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; + use sp_runtime::traits::BlakeTwo256; use super::*; #[test] @@ -526,7 +526,9 @@ mod tests { let def = Default::default(); let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let backend: Backend<_, Blake2Hasher> = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); let mut op = backend.begin_operation().unwrap(); op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); op.reset_storage(Default::default()).unwrap(); @@ -540,7 +542,9 @@ mod tests { #[test] fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, Blake2Hasher> = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let backend: Backend<_, BlakeTwo256> = Backend::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + ); match backend.state_at(BlockId::Number(0)).unwrap() { GenesisOrUnavailableState::Unavailable => (), diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index 01a93c78219bc836a6c9dc89506b66aa55889cf1..cae5d5a0aa8e290b3a372b20eb8212076965dc3c 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -23,7 +23,7 @@ use std::{ use codec::{Encode, Decode}; use sp_core::{convert_hash, NativeOrEncoded, traits::CodeExecutor}; use sp_runtime::{ - generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HasherFor}, + generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, }; use sp_externalities::Extensions; use sp_state_machine::{ @@ -40,7 +40,7 @@ use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ backend::RemoteBackend, light::RemoteCallRequest, - call_executor::CallExecutor + call_executor::CallExecutor, }; use sc_executor::{RuntimeVersion, NativeVersion}; @@ -153,9 +153,9 @@ impl CallExecutor for } } - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, - _state: &sp_state_machine::TrieBackend>, + _state: &sp_state_machine::TrieBackend>, _changes: &mut OverlayedChanges, _method: &str, _call_data: &[u8], @@ -181,7 +181,7 @@ pub fn prove_execution( ) -> ClientResult<(Vec, StorageProof)> where Block: BlockT, - S: StateBackend>, + S: StateBackend>, E: CallExecutor, { let trie_state = state.as_trie_backend() @@ -285,9 +285,11 @@ mod tests { runtime::{Header, Digest, Block}, TestClient, ClientBlockImportExt, }; use sc_executor::{NativeExecutor, WasmExecutionMethod}; - use sp_core::{Blake2Hasher, H256}; + use sp_core::H256; use sc_client_api::backend::{Backend, NewBlockState}; use crate::in_mem::Backend as InMemBackend; + use sc_client_api::ProofProvider; + use sp_runtime::traits::BlakeTwo256; struct DummyCallExecutor; @@ -342,9 +344,9 @@ mod tests { unreachable!() } - fn prove_at_trie_state>>( + fn prove_at_trie_state>>( &self, - _trie_state: &sp_state_machine::TrieBackend>, + _trie_state: &sp_state_machine::TrieBackend>, _overlay: &mut OverlayedChanges, _method: &str, _call_data: &[u8] @@ -375,7 +377,7 @@ mod tests { ).unwrap(); // check remote execution proof locally - let local_result = check_execution_proof::<_, _, Blake2Hasher>( + let local_result = check_execution_proof::<_, _, BlakeTwo256>( &local_executor(), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), @@ -402,7 +404,7 @@ mod tests { ).unwrap(); // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, Blake2Hasher, _>( + let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( &local_executor(), &RemoteCallRequest { block: substrate_test_runtime_client::runtime::Hash::default(), diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 9df6a38630684f8f545f2a4f1175fa3b21d386f6..87e21e3c0e10e6709283296febe391ad670705a6 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -341,20 +341,21 @@ pub mod tests { }; use sp_consensus::BlockOrigin; - use crate::in_mem::{Blockchain as InMemoryBlockchain}; + use crate::in_mem::Blockchain as InMemoryBlockchain; use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; - use sp_core::{blake2_256, Blake2Hasher, ChangesTrieConfiguration, H256}; + use sp_core::{blake2_256, ChangesTrieConfiguration, H256}; use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; - use sp_runtime::generic::BlockId; + use sp_runtime::{generic::BlockId, traits::BlakeTwo256}; use sp_state_machine::Backend; use super::*; + use sc_client_api::{StorageProvider, ProofProvider}; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); type TestChecker = LightDataChecker< NativeExecutor, - Blake2Hasher, + BlakeTwo256, Block, DummyStorage, >; @@ -378,7 +379,7 @@ pub mod tests { .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); let remote_read_proof = remote_client.read_proof( &remote_block_id, - &[well_known_keys::HEAP_PAGES], + &mut std::iter::once(well_known_keys::HEAP_PAGES), ).unwrap(); // check remote read proof locally @@ -426,7 +427,7 @@ pub mod tests { &remote_block_id, b":child_storage:default:child1", CHILD_INFO_1, - &[b"key1"], + &mut std::iter::once("key1".as_bytes()), ).unwrap(); // check locally @@ -464,7 +465,7 @@ pub mod tests { // check remote read proof locally let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); if insert_cht { local_storage.insert_cht_root(1, local_cht_root); } @@ -478,7 +479,7 @@ pub mod tests { fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { use sp_trie::{TrieConfiguration, trie_types::Layout}; let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); + let extrinsics_root = Layout::::ordered_trie_root(iter); // only care about `extrinsics_root` Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) @@ -624,7 +625,7 @@ pub mod tests { ).unwrap(); // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); + let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); let mut local_storage = DummyStorage::new(); local_storage.changes_tries_cht_roots.insert(0, local_cht_root); let local_checker = TestChecker::new( @@ -731,7 +732,7 @@ pub mod tests { // we're testing this test case here: // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let local_cht_root = cht::compute_root::( + let local_cht_root = cht::compute_root::( 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); let dave = StorageKey(dave); diff --git a/client/src/light/mod.rs b/client/src/light/mod.rs index d65fdef71193db17135fc324f3bb3bdeead46409..07816b3b3569252e6ab15023db74eabde75d7577 100644 --- a/client/src/light/mod.rs +++ b/client/src/light/mod.rs @@ -26,8 +26,9 @@ use std::sync::Arc; use sc_executor::RuntimeInfo; use sp_core::traits::CodeExecutor; use sp_runtime::BuildStorage; -use sp_runtime::traits::{Block as BlockT, HasherFor}; +use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_blockchain::Result as ClientResult; +use prometheus_endpoint::Registry; use crate::call_executor::LocalCallExecutor; use crate::client::Client; @@ -45,7 +46,7 @@ pub fn new_light_blockchain>(storage: S) -> A } /// Create an instance of light client backend. -pub fn new_light_backend(blockchain: Arc>) -> Arc>> +pub fn new_light_backend(blockchain: Arc>) -> Arc>> where B: BlockT, S: BlockchainStorage, @@ -55,15 +56,16 @@ pub fn new_light_backend(blockchain: Arc>) -> Arc( - backend: Arc>>, + backend: Arc>>, genesis_storage: &GS, code_executor: E, + prometheus_registry: Option, ) -> ClientResult< Client< - Backend>, + Backend>, GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E> + Backend>, + LocalCallExecutor>, E> >, B, RA @@ -84,6 +86,7 @@ pub fn new_light( Default::default(), Default::default(), Default::default(), + prometheus_registry, ) } @@ -91,7 +94,7 @@ pub fn new_light( pub fn new_fetch_checker>( blockchain: Arc>, executor: E, -) -> LightDataChecker, B, S> +) -> LightDataChecker, B, S> where E: CodeExecutor, { diff --git a/client/state-db/Cargo.toml b/client/state-db/Cargo.toml index 1bfa13693780a5e549d8d1fa82c5436ab54e4c98..7c7823b534ea27f2542d23ebeecf3a8642172303 100644 --- a/client/state-db/Cargo.toml +++ b/client/state-db/Cargo.toml @@ -1,15 +1,21 @@ [package] name = "sc-state-db" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "State database maintenance. Handles canonicalization and pruning in the database." [dependencies] parking_lot = "0.10.0" log = "0.4.8" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } +parity-util-mem = "0.5.1" +parity-util-mem-derive = "0.1.0" [dev-dependencies] env_logger = "0.7.0" diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 0eab640de84e898e41c229890effbe6975d214e0..49b1a59285e11eb332bb099b7839f8b7383f5439 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -31,7 +31,8 @@ mod noncanonical; mod pruning; -#[cfg(test)] mod test; +#[cfg(test)] +mod test; use std::fmt; use parking_lot::RwLock; @@ -40,6 +41,8 @@ use std::collections::{HashMap, hash_map::Entry}; use noncanonical::NonCanonicalOverlay; use pruning::RefWindow; use log::trace; +use parity_util_mem::{MallocSizeOf, malloc_size}; +use sc_client_api::{StateDbMemoryInfo, MemorySize}; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -120,7 +123,6 @@ pub struct ChangeSet { pub deleted: Vec, } - /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { @@ -196,8 +198,11 @@ struct StateDbSync { pinned: HashMap, } -impl StateDbSync { - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { +impl StateDbSync { + fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { trace!(target: "state-db", "StateDb settings: {:?}", mode); // Check that settings match @@ -234,7 +239,13 @@ impl StateDbSync { } } - pub fn insert_block(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, mut changeset: ChangeSet) -> Result, Error> { + fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: ChangeSet, + ) -> Result, Error> { let mut meta = ChangeSet::default(); if number == 0 { // Save pruning mode when writing first block. @@ -247,7 +258,7 @@ impl StateDbSync { // write changes immediately Ok(CommitSet { data: changeset, - meta: meta, + meta, }) }, PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { @@ -260,7 +271,10 @@ impl StateDbSync { } } - pub fn canonicalize_block(&mut self, hash: &BlockHash) -> Result, Error> { + fn canonicalize_block( + &mut self, + hash: &BlockHash, + ) -> Result, Error> { let mut commit = CommitSet::default(); if self.mode == PruningMode::ArchiveAll { return Ok(commit) @@ -280,18 +294,23 @@ impl StateDbSync { Ok(commit) } - pub fn best_canonical(&self) -> Option { + fn best_canonical(&self) -> Option { return self.non_canonical.last_canonicalized_block_number() } - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { + fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { match self.mode { PruningMode::ArchiveAll => false, PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { if self.best_canonical().map(|c| number > c).unwrap_or(true) { !self.non_canonical.have_block(hash) } else { - self.pruning.as_ref().map_or(false, |pruning| number < pruning.pending() || !pruning.have_block(hash)) + self.pruning + .as_ref() + .map_or( + false, + |pruning| number < pruning.pending() || !pruning.have_block(hash), + ) } } } @@ -320,7 +339,7 @@ impl StateDbSync { /// Revert all non-canonical blocks with the best block number. /// Returns a database commit or `None` if not possible. /// For archive an empty commit set is returned. - pub fn revert_one(&mut self) -> Option> { + fn revert_one(&mut self) -> Option> { match self.mode { PruningMode::ArchiveAll => { Some(CommitSet::default()) @@ -331,7 +350,7 @@ impl StateDbSync { } } - pub fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { + fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { match self.mode { PruningMode::ArchiveAll => Ok(()), PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { @@ -340,7 +359,7 @@ impl StateDbSync { { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { - trace!(target: "state-db", "Pinned block: {:?}", hash); + trace!(target: "state-db-pin", "Pinned block: {:?}", hash); self.non_canonical.pin(hash); } *refs += 1; @@ -352,16 +371,16 @@ impl StateDbSync { } } - pub fn unpin(&mut self, hash: &BlockHash) { + fn unpin(&mut self, hash: &BlockHash) { match self.pinned.entry(hash.clone()) { Entry::Occupied(mut entry) => { *entry.get_mut() -= 1; if *entry.get() == 0 { - trace!(target: "state-db", "Unpinned block: {:?}", hash); + trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); entry.remove(); self.non_canonical.unpin(hash); } else { - trace!(target: "state-db", "Releasing reference for {:?}", hash); + trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); } }, Entry::Vacant(_) => {}, @@ -377,12 +396,14 @@ impl StateDbSync { db.get(key.as_ref()).map_err(|e| Error::Db(e)) } - pub fn apply_pending(&mut self) { + fn apply_pending(&mut self) { self.non_canonical.apply_pending(); if let Some(pruning) = &mut self.pruning { pruning.apply_pending(); } - trace!(target: "forks", "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", + trace!( + target: "forks", + "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", self.pruning.as_ref().and_then(|p| p.next_hash()), self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), self.non_canonical.last_canonicalized_hash(), @@ -391,12 +412,20 @@ impl StateDbSync { ); } - pub fn revert_pending(&mut self) { + fn revert_pending(&mut self) { if let Some(pruning) = &mut self.pruning { pruning.revert_pending(); } self.non_canonical.revert_pending(); } + + fn memory_info(&self) -> StateDbMemoryInfo { + StateDbMemoryInfo { + non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), + pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))), + pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), + } + } } /// State DB maintenance. See module description. @@ -405,21 +434,33 @@ pub struct StateDb { db: RwLock>, } -impl StateDb { +impl StateDb { /// Creates a new instance. Does not expect any metadata in the database. - pub fn new(mode: PruningMode, db: &D) -> Result, Error> { + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { Ok(StateDb { db: RwLock::new(StateDbSync::new(mode, db)?) }) } /// Add a new non-canonical block. - pub fn insert_block(&self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { self.db.write().insert_block(hash, number, parent_hash, changeset) } /// Finalize a previously inserted block. - pub fn canonicalize_block(&self, hash: &BlockHash) -> Result, Error> { + pub fn canonicalize_block( + &self, + hash: &BlockHash, + ) -> Result, Error> { self.db.write().canonicalize_block(hash) } @@ -466,6 +507,11 @@ impl StateDb { pub fn revert_pending(&self) { self.db.write().revert_pending(); } + + /// Returns the current memory statistics of this instance. + pub fn memory_info(&self) -> StateDbMemoryInfo { + self.db.read().memory_info() + } } #[cfg(test)] diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index db2f58fa8981d2231a750f8c68472c92f278ff87..6a34523b66fff9e510518a4db4a884b5a693daf4 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -30,6 +30,7 @@ const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; /// See module documentation. +#[derive(parity_util_mem_derive::MallocSizeOf)] pub struct NonCanonicalOverlay { last_canonicalized: Option<(BlockHash, u64)>, levels: VecDeque>>, @@ -55,6 +56,7 @@ fn to_journal_key(block: u64, index: u64) -> Vec { } #[cfg_attr(test, derive(PartialEq, Debug))] +#[derive(parity_util_mem_derive::MallocSizeOf)] struct BlockOverlay { hash: BlockHash, journal_key: Vec, @@ -99,8 +101,10 @@ fn discard_descendants( let mut discarded = Vec::new(); if let Some(level) = levels.get_mut(index) { *level = level.drain(..).filter_map(|overlay| { - let parent = parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); - if parent == *hash { + let parent = parents.get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed"); + + if parent == hash { discarded.push(overlay.hash.clone()); if pinned.contains_key(&overlay.hash) { // save to be discarded later. @@ -375,7 +379,7 @@ impl NonCanonicalOverlay { None } - /// Check if the block is in the canonicalization queue. + /// Check if the block is in the canonicalization queue. pub fn have_block(&self, hash: &BlockHash) -> bool { (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) && !self.pending_canonicalizations.contains(hash) @@ -436,7 +440,7 @@ impl NonCanonicalOverlay { while let Some(hash) = parent { let refs = self.pinned.entry(hash.clone()).or_default(); if *refs == 0 { - trace!(target: "state-db", "Pinned non-canon block: {:?}", hash); + trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); } *refs += 1; parent = self.parents.get(hash); @@ -455,7 +459,7 @@ impl NonCanonicalOverlay { if *entry.get() == 0 { entry.remove(); if let Some(inserted) = self.pinned_insertions.remove(&hash) { - trace!(target: "state-db", "Discarding unpinned non-canon block: {:?}", hash); + trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash); discard_values(&mut self.values, inserted); self.parents.remove(&hash); } diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 71d018087b5903cfdb8ccc24999612fcfb12c02f..6cf5f260060f500432ff306eb2e3f3c6a69f9089 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -31,6 +31,7 @@ const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. +#[derive(parity_util_mem_derive::MallocSizeOf)] pub struct RefWindow { /// A queue of keys that should be deleted for each block in the pruning window. death_rows: VecDeque>, @@ -46,7 +47,7 @@ pub struct RefWindow { pending_prunings: usize, } -#[derive(Debug, PartialEq, Eq)] +#[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] struct DeathRow { hash: BlockHash, journal_key: Vec, diff --git a/client/telemetry/Cargo.toml b/client/telemetry/Cargo.toml index 10f4b188af6b994f5f7297022ae84c53db5d9a30..248c90ec68623f0320815fbf25511d07304dc31d 100644 --- a/client/telemetry/Cargo.toml +++ b/client/telemetry/Cargo.toml @@ -1,10 +1,14 @@ [package] name = "sc-telemetry" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Telemetry utils" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sc-telemetry" + [dependencies] bytes = "0.5" @@ -12,7 +16,7 @@ parking_lot = "0.10.0" futures = "0.3.1" futures-timer = "3.0.1" wasm-timer = "0.2.0" -libp2p = { version = "0.16.0", default-features = false, features = ["libp2p-websocket"] } +libp2p = { version = "0.16.2", default-features = false, features = ["libp2p-websocket"] } log = "0.4.8" pin-project = "0.4.6" rand = "0.7.2" diff --git a/client/tracing/Cargo.toml b/client/tracing/Cargo.toml index b476db6a0113d73167196b22f839ec2d0844e282..ad988f0607953fb921d443c15315741dd1820054 100644 --- a/client/tracing/Cargo.toml +++ b/client/tracing/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-tracing" -version = "2.0.0" +version = "2.0.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Instrumentation implementation for substrate." [dependencies] erased-serde = "0.3.9" @@ -14,8 +17,7 @@ serde_json = "1.0.41" slog = { version = "2.5.2", features = ["nested-values"] } tracing-core = "0.1.7" -sc-telemetry = { version = "2.0.0", path = "../telemetry" } -grafana-data-source = { version = "0.8", path = "../../utils/grafana-data-source" } +sc-telemetry = { version = "2.0.0-alpha.2", path = "../telemetry" } [dev-dependencies] tracing = "0.1.10" diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index cd301041d39af4854522e3aa49608b93b3d28bd0..c00bca9275eec7fa2bf99050b6413c99f846bc78 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -34,7 +34,7 @@ //! let span = tracing::span!(tracing::Level::INFO, "my_span_name", my_number = 10, a_key = "a value"); //! let _guard = span.enter(); //! ``` -//! Currently we provide `Log` (default), `Telemetry` and `Grafana` variants for `Receiver` +//! Currently we provide `Log` (default), `Telemetry` variants for `Receiver` use std::collections::HashMap; use std::fmt; @@ -53,7 +53,6 @@ use tracing_core::{ subscriber::Subscriber }; -use grafana_data_source::{self, record_metrics}; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; /// Used to configure how to receive the metrics @@ -63,8 +62,6 @@ pub enum TracingReceiver { Log, /// Output to telemetry Telemetry, - /// Output to Grafana - Grafana, } impl Default for TracingReceiver { @@ -255,7 +252,6 @@ impl ProfilingSubscriber { match self.receiver { TracingReceiver::Log => print_log(span_datum), TracingReceiver::Telemetry => send_telemetry(span_datum), - TracingReceiver::Grafana => send_grafana(span_datum), } } } @@ -291,9 +287,3 @@ fn send_telemetry(span_datum: SpanDatum) { ); } -fn send_grafana(span_datum: SpanDatum) { - let name = format!("{}::{}", span_datum.target, span_datum.name); - if let Err(e) = record_metrics!(&name => span_datum.overall_time.as_nanos(),) { - log::warn!("Unable to send metrics to grafana: {:?}", e); - } -} diff --git a/client/transaction-pool/Cargo.toml b/client/transaction-pool/Cargo.toml index 7ccc98a9c09e92f63815e76f3bddc61b8d2c35a9..3d80e06c955f75317fe03df0a8800e27fff7bf0d 100644 --- a/client/transaction-pool/Cargo.toml +++ b/client/transaction-pool/Cargo.toml @@ -1,29 +1,32 @@ [package] name = "sc-transaction-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate transaction pool implementation." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } derive_more = "0.99.2" futures = { version = "0.3.1", features = ["compat"] } futures-diagnose = "1.0" log = "0.4.8" parking_lot = "0.10.0" wasm-timer = "0.2" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-api = { version = "2.0.0", path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sc-transaction-graph = { version = "2.0.0", path = "./graph" } -sp-transaction-pool = { version = "2.0.0", path = "../../primitives/transaction-pool" } -sc-client-api = { version = "2.0.0", path = "../api" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sc-transaction-graph = { version = "2.0.0-alpha.2", path = "./graph" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../primitives/transaction-pool" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../api" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } futures-timer = "2.0" parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -substrate-test-runtime-transaction-pool = { version = "2.0.0", path = "../../test-utils/runtime/transaction-pool" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +substrate-test-runtime-transaction-pool = { version = "2.0.0-dev", path = "../../test-utils/runtime/transaction-pool" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } diff --git a/client/transaction-pool/graph/Cargo.toml b/client/transaction-pool/graph/Cargo.toml index daec970a69493a5e9f49272c5a3bd3cae0bd9954..10846acfea18825bcb2215f49e0d22c785bbaf94 100644 --- a/client/transaction-pool/graph/Cargo.toml +++ b/client/transaction-pool/graph/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "sc-transaction-graph" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Generic Transaction Pool" [dependencies] derive_more = "0.99.2" @@ -12,17 +15,17 @@ log = "0.4.8" parking_lot = "0.10.0" serde = { version = "1.0.101", features = ["derive"] } wasm-timer = "0.2" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } linked-hash-map = "0.5.2" [dev-dependencies] assert_matches = "1.3.0" -codec = { package = "parity-scale-codec", version = "1.0.0" } -substrate-test-runtime = { version = "2.0.0", path = "../../../test-utils/runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../../test-utils/runtime" } criterion = "0.3" [[bench]] diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/graph/benches/basics.rs index 54bbe930b393bd37a94cb648272f1a66199cb691..6f5d39f09f5c540a31762cd129e89f48bb4c66d1 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/graph/benches/basics.rs @@ -113,7 +113,11 @@ impl ChainApi for TestApi { } fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic::Transfer(transfer, Default::default()) + Extrinsic::Transfer { + transfer, + signature: Default::default(), + exhaust_resources_when_not_first: false, + } } fn bench_configured(pool: Pool, number: u64) { diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index 12601bb8d21be1e3349cf4eb2e6879215ac983a9..f0bf17dcb8dd2ecb2091dc9a799c06e8ac90f879 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -524,7 +524,11 @@ mod tests { } fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic::Transfer(transfer, Default::default()) + Extrinsic::Transfer { + transfer, + signature: Default::default(), + exhaust_resources_when_not_first: false, + } } fn pool() -> Pool { diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 84e06cc33e4f5c83e001dce41d68e882fd2657c7..d14f532435f3f2fe2bdcf1a5a853013d3f00aa90 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -27,7 +27,6 @@ use sc_client_api::{ light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, BlockBody, }; -use sp_core::Hasher; use sp_runtime::{ generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, transaction_validity::TransactionValidity, @@ -120,7 +119,7 @@ impl sc_transaction_graph::ChainApi for FullChainApi) -> (Self::Hash, usize) { ex.using_encoded(|x| { - (traits::HasherFor::::hash(x), x.len()) + ( as traits::Hash>::hash(x), x.len()) }) } } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index a69323553fa8f53c33021e482ac2eedfd79a881c..7ee73b862ad464bfcc869548a83e0cb1b26a6440 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -16,6 +16,7 @@ //! Substrate transaction pool implementation. +#![recursion_limit="256"] #![warn(missing_docs)] #![warn(unused_extern_crates)] diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index dbf8a2935427113916c78ceffcb8bde742da78db..b915f1fe71935e3af5f66fc9bddf788ba153e015 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -113,7 +113,9 @@ async fn batch_revalidate( } pool.validated_pool().remove_invalid(&invalid_hashes); - pool.resubmit(revalidated); + if revalidated.len() > 0 { + pool.resubmit(revalidated); + } } impl RevalidationWorker { @@ -149,6 +151,7 @@ impl RevalidationWorker { } else { for xt in &to_queue { extrinsics.remove(xt); + self.members.remove(xt); } } left -= to_queue.len(); @@ -163,6 +166,10 @@ impl RevalidationWorker { queued_exts } + fn len(&self) -> usize { + self.block_ordered.iter().map(|b| b.1.len()).sum() + } + fn push(&mut self, worker_payload: WorkerPayload) { // we don't add something that already scheduled for revalidation let transactions = worker_payload.transactions; @@ -170,7 +177,15 @@ impl RevalidationWorker { for ext_hash in transactions { // we don't add something that already scheduled for revalidation - if self.members.contains_key(&ext_hash) { continue; } + if self.members.contains_key(&ext_hash) { + log::debug!( + target: "txpool", + "[{:?}] Skipped adding for revalidation: Already there.", + ext_hash, + ); + + continue; + } self.block_ordered.entry(block_number) .and_modify(|value| { value.insert(ext_hash.clone()); }) @@ -198,7 +213,18 @@ impl RevalidationWorker { futures::select! { _ = interval.next() => { let next_batch = this.prepare_batch(); + let batch_len = next_batch.len(); + batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; + + if batch_len > 0 || this.len() > 0 { + log::debug!( + target: "txpool", + "Revalidated {} transactions. Left in the queue for revalidation: {}.", + batch_len, + this.len(), + ); + } }, workload = from_queue.next() => { match workload { @@ -264,6 +290,10 @@ where /// If queue configured without background worker, this will resolve after /// revalidation is actually done. pub async fn revalidate_later(&self, at: NumberFor, transactions: Vec>) { + if transactions.len() > 0 { + log::debug!(target: "txpool", "Added {} transactions to revalidation queue", transactions.len()); + } + if let Some(ref to_worker) = self.background { if let Err(e) = to_worker.unbounded_send(WorkerPayload { at, transactions }) { log::warn!(target: "txpool", "Failed to update background worker: {:?}", e); diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 37b80df9e1be42d0d9f5c12c1055e4032a4d92af..d9f54ede94a4454174acbdc99b33bf5fa5dd4b58 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -267,6 +267,34 @@ fn should_not_retain_invalid_hashes_from_retracted() { assert_eq!(pool.status().ready, 0); } +#[test] +fn should_revalidate_transaction_multiple_times() { + let xt = uxt(Alice, 209); + + let (pool, _guard) = maintained_pool(); + + block_on(pool.submit_one(&BlockId::number(0), xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + pool.api.push_block(1, vec![xt.clone()]); + + // maintenance is in background + block_on(pool.maintain(block_event(1))); + block_on(futures_timer::Delay::new(BACKGROUND_REVALIDATION_INTERVAL*2)); + + block_on(pool.submit_one(&BlockId::number(0), xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + + pool.api.push_block(2, vec![]); + pool.api.add_invalid(&xt); + + // maintenance is in background + block_on(pool.maintain(block_event(2))); + block_on(futures_timer::Delay::new(BACKGROUND_REVALIDATION_INTERVAL*2)); + + assert_eq!(pool.status().ready, 0); +} + #[test] fn should_push_watchers_during_maintaince() { fn alice_uxt(nonce: u64) -> Extrinsic { @@ -384,7 +412,7 @@ fn fork_aware_finalization() { let mut canon_watchers = vec![]; let from_alice = uxt(Alice, 1); - let from_dave = uxt(Dave, 1); + let from_dave = uxt(Dave, 2); let from_bob = uxt(Bob, 1); let from_charlie = uxt(Charlie, 1); pool.api.increment_nonce(Alice.into()); @@ -405,6 +433,7 @@ fn fork_aware_finalization() { let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_alice.clone())).expect("1. Imported"); let header = pool.api.push_block(2, vec![from_alice.clone()]); canon_watchers.push((watcher, header.hash())); + assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBlock { id: BlockId::Number(2), @@ -414,6 +443,7 @@ fn fork_aware_finalization() { }; b1 = header.hash(); block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); let event = ChainEvent::Finalized { hash: b1 }; block_on(pool.maintain(event)); } @@ -423,6 +453,7 @@ fn fork_aware_finalization() { let header = pool.api.push_fork_block_with_parent(b1, vec![from_dave.clone()]); from_dave_watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_dave.clone())) .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); let event = ChainEvent::NewBlock { id: BlockId::Hash(header.hash()), is_new_best: true, @@ -431,11 +462,13 @@ fn fork_aware_finalization() { }; c2 = header.hash(); block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); } // block D2 { from_bob_watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_bob.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); let header = pool.api.push_fork_block_with_parent(c2, vec![from_bob.clone()]); let event = ChainEvent::NewBlock { @@ -446,11 +479,13 @@ fn fork_aware_finalization() { }; d2 = header.hash(); block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); } // block C1 { let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), from_charlie.clone())).expect("1.Imported"); + assert_eq!(pool.status().ready, 1); let header = pool.api.push_block(3, vec![from_charlie.clone()]); canon_watchers.push((watcher, header.hash())); @@ -461,6 +496,7 @@ fn fork_aware_finalization() { retracted: vec![c2, d2], }; block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 2); let event = ChainEvent::Finalized { hash: header.hash() }; block_on(pool.maintain(event)); } @@ -469,6 +505,7 @@ fn fork_aware_finalization() { { let xt = uxt(Eve, 0); let w = block_on(pool.submit_and_watch(&BlockId::number(1), xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 3); let header = pool.api.push_block(4, vec![xt.clone()]); canon_watchers.push((w, header.hash())); @@ -480,6 +517,7 @@ fn fork_aware_finalization() { }; d1 = header.hash(); block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 2); let event = ChainEvent::Finalized { hash: d1 }; block_on(pool.maintain(event)); } @@ -488,7 +526,7 @@ fn fork_aware_finalization() { // block e1 { - let header = pool.api.push_block(5, vec![from_dave]); + let header = pool.api.push_block(5, vec![from_dave, from_bob]); e1 = header.hash(); let event = ChainEvent::NewBlock { id: BlockId::Hash(header.hash()), @@ -497,6 +535,7 @@ fn fork_aware_finalization() { retracted: vec![] }; block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); } @@ -515,15 +554,8 @@ fn fork_aware_finalization() { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); - - // can be either Ready, or InBlock, depending on which event comes first - assert_eq!( - match stream.next() { - Some(TransactionStatus::Ready) => stream.next(), - val @ _ => val, - }, - Some(TransactionStatus::InBlock(e1)), - ); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); assert_eq!(stream.next(), None); } @@ -533,6 +565,10 @@ fn fork_aware_finalization() { assert_eq!(stream.next(), Some(TransactionStatus::Ready)); assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); + assert_eq!(stream.next(), None); } } diff --git a/docs/CODEOWNERS b/docs/CODEOWNERS index 9304f278534bbe4faa0bc7523c7a27bb99781ce5..701df299a35453ce71a037150d46d329847d5f80 100644 --- a/docs/CODEOWNERS +++ b/docs/CODEOWNERS @@ -75,3 +75,6 @@ # Authority discovery /client/authority-discovery/ @mxinden /frame/authority-discovery/ @mxinden + +# Prometheus endpoint +/utils/prometheus/ @mxinden diff --git a/frame/assets/Cargo.toml b/frame/assets/Cargo.toml index cd6b41c4c7a3c5523b0622c968b7f1fd4b9c055b..5b0b5f096a91fdfea93956119879c8873d714de9 100644 --- a/frame/assets/Cargo.toml +++ b/frame/assets/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "pallet-assets" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME asset management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } # Needed for various traits. In our case, `OnFinalize`. -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } # Needed for type-safe access to storage DB. -frame-support = { version = "2.0.0", default-features = false, path = "../support" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } # `system` module provides us with all sorts of useful stuff and macros depend on it being around. -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-std = { version = "2.0.0", path = "../../primitives/std" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index ebd1d17bcffe4628933917ea46f5e14a9518480a..042ff89913417ac710a20f348662ea6a2d10eceb 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -295,7 +295,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for Test { type Event = (); diff --git a/frame/aura/Cargo.toml b/frame/aura/Cargo.toml index e982a02b47c8db39d755af9739123c4f909562a0..77abfe4ae1f48aa062c50d4d516e62dd4042631d 100644 --- a/frame/aura/Cargo.toml +++ b/frame/aura/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "pallet-aura" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME AURA consensus pallet" [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-io ={ path = "../../primitives/io", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false} -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } +pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +sp-consensus-aura = { path = "../../primitives/consensus/aura", default-features = false, version = "0.8.0-alpha.2"} +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } [dev-dependencies] diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 5b3ccd7745f8cc9cd261c45ebe7a8e21ca691176..05a161ee49c3d6c9ac7c51028d31f1dd9d193bc8 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -63,7 +63,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Test { diff --git a/frame/authority-discovery/Cargo.toml b/frame/authority-discovery/Cargo.toml index d8c72683cac786067793c4883d167f567dc15c18..c82d1eab588b39fcf27f5924ca5471a8b4b616fd 100644 --- a/frame/authority-discovery/Cargo.toml +++ b/frame/authority-discovery/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "pallet-authority-discovery" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for authority discovery" [dependencies] -sp-authority-discovery = { version = "2.0.0", default-features = false, path = "../../primitives/authority-discovery" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-authority-discovery = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/authority-discovery" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -pallet-session = { version = "2.0.0", features = ["historical" ], path = "../session", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +pallet-session = { version = "2.0.0-alpha.2", features = ["historical" ], path = "../session", default-features = false } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } [features] default = ["std"] diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index 77906e1bfe34f7e12bf9c7cb03978f71128906bb..8ee4931e488c3740174706480188dfbbdd5b1857 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -159,7 +159,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl_outer_origin! { diff --git a/frame/authorship/Cargo.toml b/frame/authorship/Cargo.toml index 61056820f43f107aaacf5f8707603c8395542913..2a01cdbd8ad85b22245bf68b8505b4c8be846d6c 100644 --- a/frame/authorship/Cargo.toml +++ b/frame/authorship/Cargo.toml @@ -1,21 +1,23 @@ [package] name = "pallet-authorship" -version = "2.0.0" +version = "2.0.0-alpha.3" description = "Block and Uncle Author tracking for the FRAME" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-authorship = { version = "2.0.0", default-features = false, path = "../../primitives/authorship" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-io ={ path = "../../primitives/io", default-features = false } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/authorship" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} impl-trait-for-tuples = "0.1.3" [features] diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index 335e13a7fd71b17aac8750de5e4f7bfd1e1eb2ad..d3c1bf752aeb8a197333946e3d4d4f8a751b765f 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -433,7 +433,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } parameter_types! { diff --git a/frame/babe/Cargo.toml b/frame/babe/Cargo.toml index 5e7764862a66b4c58f5b9fcb57e58a6325d97659..7e39e5bed5c29a607ef3faf077ab1964d3921c57 100644 --- a/frame/babe/Cargo.toml +++ b/frame/babe/Cargo.toml @@ -1,32 +1,35 @@ [package] name = "pallet-babe" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Consensus extension module for BABE consensus. Collects on-chain randomness from VRF outputs and manages epoch transitions." [dependencies] hex-literal = "0.2.1" -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-consensus-babe = { version = "0.8", default-features = false, path = "../../primitives/consensus/babe" } -sp-io ={ path = "../../primitives/io", default-features = false } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } +pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } +sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/babe" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} [dev-dependencies] lazy_static = "1.4.0" parking_lot = "0.10.0" -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -substrate-test-runtime = { version = "2.0.0", path = "../../test-utils/runtime" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../test-utils/runtime" } [features] default = ["std"] diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index aba3a75eb970d6a6ec2bd741387a8c80785303b1..4dc9304fa8467f7e2bf36dcda500fb8fda37962e 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -248,7 +248,6 @@ impl pallet_session::ShouldEndSession for Module { /// A BABE equivocation offence report. /// /// When a validator released two or more blocks at the same slot. -#[allow(dead_code)] struct BabeEquivocationOffence { /// A babe slot number in which this incident happened. slot: u64, diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index efb5570c1db7b3d9da7fe95db6ad8799e153d4f2..2ec083728e82ceb8488fa9c24efb7223ae5f450e 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -15,17 +15,15 @@ // along with Substrate. If not, see . //! Test utilities -#![allow(dead_code, unused_imports)] use super::{Trait, Module, GenesisConfig}; -use sp_consensus_babe::AuthorityId; use sp_runtime::{ - traits::IdentityLookup, Perbill, PerThing, testing::{Header, UintAuthorityId}, impl_opaque_keys, + traits::IdentityLookup, Perbill, testing::{Header, UintAuthorityId}, impl_opaque_keys, }; use sp_version::RuntimeVersion; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_io; -use sp_core::{H256, Blake2Hasher}; +use sp_core::H256; impl_outer_origin!{ pub enum Origin for Test where system = frame_system {} @@ -68,7 +66,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl_opaque_keys! { diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 976a264d7ba7bb24f29c556ec92822a728598075..84f8166b10431a9d107cf82d831d4aaa3230dfe1 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -17,7 +17,7 @@ //! Consensus extension module tests for BABE consensus. use super::*; -use mock::{new_test_ext, Babe, Test}; +use mock::{new_test_ext, Babe, System}; use sp_runtime::{traits::OnFinalize, testing::{Digest, DigestItem}}; use pallet_session::ShouldEndSession; @@ -66,8 +66,6 @@ fn check_module() { }) } -type System = frame_system::Module; - #[test] fn first_block_epoch_zero_start() { new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { diff --git a/frame/balances/Cargo.toml b/frame/balances/Cargo.toml index f2bf1069afb1dbbd9a93d17c56d597ce92734972..6bb551c9c6fea03a747e700e5aac6f0d8156f300 100644 --- a/frame/balances/Cargo.toml +++ b/frame/balances/Cargo.toml @@ -1,24 +1,26 @@ [package] name = "pallet-balances" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage balances" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../transaction-payment" } [features] default = ["std"] @@ -32,3 +34,4 @@ std = [ "frame-support/std", "frame-system/std", ] +runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index e564824aaa3902dc28482d855d8fb4c674ded57e..2473ce292004754574a96be0d07c3d2af3c038a5 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -19,305 +19,101 @@ use super::*; use frame_system::RawOrigin; -use sp_io::hashing::blake2_256; -use frame_benchmarking::{ - BenchmarkResults, BenchmarkParameter, Benchmarking, BenchmarkingSetup, benchmarking, -}; -use sp_runtime::traits::{Bounded, Dispatchable}; +use frame_benchmarking::{benchmarks, account}; +use sp_runtime::traits::Bounded; use crate::Module as Balances; -// Support Functions -fn account(name: &'static str, index: u32) -> T::AccountId { - let entropy = (name, index).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() -} +const SEED: u32 = 0; +const MAX_EXISTENTIAL_DEPOSIT: u32 = 1000; +const MAX_USER_INDEX: u32 = 1000; -// Benchmark `transfer` extrinsic with the worst possible conditions: -// * Transfer will kill the sender account. -// * Transfer will create the recipient account. -struct Transfer; -impl BenchmarkingSetup, RawOrigin> for Transfer { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Existential Deposit Multiplier - (BenchmarkParameter::E, 2, 1000), - // User Seed - (BenchmarkParameter::U, 1, 1000), - ] +benchmarks! { + _ { + let e in 2 .. MAX_EXISTENTIAL_DEPOSIT => (); + let u in 1 .. MAX_USER_INDEX => (); } - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Constants - let ed = T::ExistentialDeposit::get(); + // Benchmark `transfer` extrinsic with the worst possible conditions: + // * Transfer will kill the sender account. + // * Transfer will create the recipient account. + transfer { + let u in ...; + let e in ...; - // Select an account - let u = components.iter().find(|&c| c.0 == BenchmarkParameter::U).unwrap().1; - let user = account::("user", u); - let user_origin = RawOrigin::Signed(user.clone()); + let existential_deposit = T::ExistentialDeposit::get(); + let caller = account("caller", u, SEED); // Give some multiple of the existential deposit + creation fee + transfer fee - let e = components.iter().find(|&c| c.0 == BenchmarkParameter::E).unwrap().1; - let balance = ed.saturating_mul(e.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance); + let balance = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. - let recipient = account::("recipient", u); + let recipient = account("recipient", u, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); - let transfer_amt = ed.saturating_mul((e - 1).into()) + 1.into(); + let transfer_amount = existential_deposit.saturating_mul((e - 1).into()) + 1.into(); + }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) - // Return the `transfer` call - Ok((crate::Call::::transfer(recipient_lookup, transfer_amt), user_origin)) - } -} - -// Benchmark `transfer` with the best possible condition: -// * Both accounts exist and will continue to exist. -struct TransferBestCase; -impl BenchmarkingSetup, RawOrigin> for TransferBestCase { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Existential Deposit Multiplier - (BenchmarkParameter::E, 2, 1000), - // User Seed - (BenchmarkParameter::U, 1, 1000), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Constants - let ed = T::ExistentialDeposit::get(); - - // Select a sender - let u = components.iter().find(|&c| c.0 == BenchmarkParameter::U).unwrap().1; - let user = account::("user", u); - let user_origin = RawOrigin::Signed(user.clone()); + // Benchmark `transfer` with the best possible condition: + // * Both accounts exist and will continue to exist. + transfer_best_case { + let u in ...; + let e in ...; - // Select a recipient - let recipient = account::("recipient", u); + let caller = account("caller", u, SEED); + let recipient: T::AccountId = account("recipient", u, SEED); let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - // Get the existential deposit multiplier - let e = components.iter().find(|&c| c.0 == BenchmarkParameter::E).unwrap().1; - // Give the sender account max funds for transfer (their account will never reasonably be killed). - let _ = as Currency<_>>::make_free_balance_be(&user, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); // Give the recipient account existential deposit (thus their account already exists). - let _ = as Currency<_>>::make_free_balance_be(&recipient, ed); - - // Transfer e * existential deposit. - let transfer_amt = ed.saturating_mul(e.into()); - - // Return the `transfer` call - Ok((crate::Call::::transfer(recipient_lookup, transfer_amt), user_origin)) - } -} - -// Benchmark `transfer_keep_alive` with the worst possible condition: -// * The recipient account is created. -struct TransferKeepAlive; -impl BenchmarkingSetup, RawOrigin> for TransferKeepAlive { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Existential Deposit Multiplier - (BenchmarkParameter::E, 2, 1000), - // User Seed - (BenchmarkParameter::U, 1, 1000), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Constants - let ed = T::ExistentialDeposit::get(); - - // Select a sender - let u = components.iter().find(|&c| c.0 == BenchmarkParameter::U).unwrap().1; - let user = account::("user", u); - let user_origin = RawOrigin::Signed(user.clone()); - - // Select a recipient - let recipient = account::("recipient", u); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - - // Get the existential deposit multiplier - let e = components.iter().find(|&c| c.0 == BenchmarkParameter::E).unwrap().1; + let existential_deposit = T::ExistentialDeposit::get(); + let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); + let transfer_amount = existential_deposit.saturating_mul(e.into()); + }: transfer(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + + // Benchmark `transfer_keep_alive` with the worst possible condition: + // * The recipient account is created. + transfer_keep_alive { + let u in ...; + let e in ...; + + let caller = account("caller", u, SEED); + let recipient = account("recipient", u, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); // Give the sender account max funds, thus a transfer will not kill account. - let _ = as Currency<_>>::make_free_balance_be(&user, T::Balance::max_value()); + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let existential_deposit = T::ExistentialDeposit::get(); + let transfer_amount = existential_deposit.saturating_mul(e.into()); + }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) - // Transfer e * existential deposit. - let transfer_amt = ed.saturating_mul(e.into()); + // Benchmark `set_balance` coming from ROOT account. This always creates an account. + set_balance { + let u in ...; + let e in ...; - // Return the `transfer_keep_alive` call - Ok((crate::Call::::transfer_keep_alive(recipient_lookup, transfer_amt), user_origin)) - } -} - -// Benchmark `set_balance` coming from ROOT account. This always creates an account. -struct SetBalance; -impl BenchmarkingSetup, RawOrigin> for SetBalance { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Existential Deposit Multiplier - (BenchmarkParameter::E, 2, 1000), - // User Seed - (BenchmarkParameter::U, 1, 1000), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Constants - let ed = T::ExistentialDeposit::get(); - - // Select a sender - let u = components.iter().find(|&c| c.0 == BenchmarkParameter::U).unwrap().1; - let user = account::("user", u); + let user: T::AccountId = account("user", u, SEED); let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); - // Get the existential deposit multiplier for free and reserved - let e = components.iter().find(|&c| c.0 == BenchmarkParameter::E).unwrap().1; - let balance_amt = ed.saturating_mul(e.into()); - - // Return the `set_balance` call - Ok((crate::Call::::set_balance(user_lookup, balance_amt, balance_amt), RawOrigin::Root)) - } -} - -// Benchmark `set_balance` coming from ROOT account. This always kills an account. -struct SetBalanceKilling; -impl BenchmarkingSetup, RawOrigin> for SetBalanceKilling { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Existential Deposit Multiplier - (BenchmarkParameter::E, 2, 1000), - // User Seed - (BenchmarkParameter::U, 1, 1000), - ] - } + // Give the user some initial balance. + let existential_deposit = T::ExistentialDeposit::get(); + let balance_amount = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + }: _(RawOrigin::Root, user_lookup, balance_amount, balance_amount) - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Constants - let ed = T::ExistentialDeposit::get(); + // Benchmark `set_balance` coming from ROOT account. This always kills an account. + set_balance_killing { + let u in ...; + let e in ...; - // Select a sender - let u = components.iter().find(|&c| c.0 == BenchmarkParameter::U).unwrap().1; - let user = account::("user", u); + let user: T::AccountId = account("user", u, SEED); let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); - // Get the existential deposit multiplier for free and reserved - let e = components.iter().find(|&c| c.0 == BenchmarkParameter::E).unwrap().1; - // Give the user some initial balance - let balance_amt = ed.saturating_mul(e.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amt); - - // Return the `set_balance` call that will kill the account - Ok((crate::Call::::set_balance(user_lookup, 0.into(), 0.into()), RawOrigin::Root)) - } -} - -// The list of available benchmarks for this pallet. -enum SelectedBenchmark { - Transfer, - TransferBestCase, - TransferKeepAlive, - SetBalance, - SetBalanceKilling, -} - -// Allow us to select a benchmark from the list of available benchmarks. -impl BenchmarkingSetup, RawOrigin> for SelectedBenchmark { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - match self { - Self::Transfer => , RawOrigin>>::components(&Transfer), - Self::TransferBestCase => , RawOrigin>>::components(&TransferBestCase), - Self::TransferKeepAlive => , RawOrigin>>::components(&TransferKeepAlive), - Self::SetBalance => , RawOrigin>>::components(&SetBalance), - Self::SetBalanceKilling => , RawOrigin>>::components(&SetBalanceKilling), - } - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - match self { - Self::Transfer => , RawOrigin>>::instance(&Transfer, components), - Self::TransferBestCase => , RawOrigin>>::instance(&TransferBestCase, components), - Self::TransferKeepAlive => , RawOrigin>>::instance(&TransferKeepAlive, components), - Self::SetBalance => , RawOrigin>>::instance(&SetBalance, components), - Self::SetBalanceKilling => , RawOrigin>>::instance(&SetBalanceKilling, components), - } - } -} - -impl Benchmarking for Module { - fn run_benchmark(extrinsic: Vec, steps: u32, repeat: u32) -> Result, &'static str> { - // Map the input to the selected benchmark. - let selected_benchmark = match extrinsic.as_slice() { - b"transfer" => SelectedBenchmark::Transfer, - b"transfer_best_case" => SelectedBenchmark::TransferBestCase, - b"transfer_keep_alive" => SelectedBenchmark::TransferKeepAlive, - b"set_balance" => SelectedBenchmark::SetBalance, - b"set_balance_killing" => SelectedBenchmark::SetBalanceKilling, - _ => return Err("Could not find extrinsic."), - }; - - // Warm up the DB - benchmarking::commit_db(); - benchmarking::wipe_db(); - - let components = , RawOrigin>>::components(&selected_benchmark); - // results go here - let mut results: Vec = Vec::new(); - // Select the component we will be benchmarking. Each component will be benchmarked. - for (name, low, high) in components.iter() { - // Create up to `STEPS` steps for that component between high and low. - let step_size = ((high - low) / steps).max(1); - let num_of_steps = (high - low) / step_size; - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = low + step_size * s; - - // Select the mid value for all the other components. - let c: Vec<(BenchmarkParameter, u32)> = components.iter() - .map(|(n, l, h)| - (*n, if n == name { component_value } else { (h - l) / 2 + l }) - ).collect(); - - // Run the benchmark `repeat` times. - for _r in 0..repeat { - // Set up the externalities environment for the setup we want to benchmark. - let (call, caller) = , RawOrigin>>::instance(&selected_benchmark, &c)?; - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - benchmarking::commit_db(); - // Run the benchmark. - let start = benchmarking::current_time(); - call.dispatch(caller.clone().into())?; - let finish = benchmarking::current_time(); - let elapsed = finish - start; - sp_std::if_std!{ - if let RawOrigin::Signed(who) = caller.clone() { - let balance = Account::::get(&who).free; - println!("Free Balance {:?}", balance); - } - } - results.push((c.clone(), elapsed)); - // Wipe the DB back to the genesis state. - benchmarking::wipe_db(); - } - } - } - return Ok(results); - } -} + // Give the user some initial balance. + let existential_deposit = T::ExistentialDeposit::get(); + let balance_amount = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + }: set_balance(RawOrigin::Root, user_lookup, 0.into(), 0.into()) +} \ No newline at end of file diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 0f5e8b40d5efa3f0c257262a8cbcc8b55cb06304..f91c9e8229050cfd9182d2fe55f15de9133b0db2 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -155,15 +155,18 @@ mod tests_composite; #[cfg(test)] #[macro_use] mod tests; +#[cfg(feature = "runtime-benchmarks")] mod benchmarking; +mod migration; use sp_std::prelude::*; use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; +use sp_io::hashing::twox_64; use codec::{Codec, Encode, Decode}; use frame_support::{ StorageValue, Parameter, decl_event, decl_storage, decl_module, decl_error, ensure, weights::SimpleDispatchInfo, traits::{ - Currency, OnReapAccount, OnUnbalanced, TryDrop, StoredMap, + Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status @@ -178,7 +181,7 @@ use sp_runtime::{ }; use frame_system::{self as system, ensure_signed, ensure_root}; use frame_support::storage::migration::{ - get_storage_value, take_storage_value, put_storage_value, StorageIterator + get_storage_value, take_storage_value, put_storage_value, StorageIterator, have_storage_value }; pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; @@ -346,6 +349,21 @@ impl AccountData { } } +// A value placed in storage that represents the current version of the Balances storage. +// This value is used by the `on_runtime_upgrade` logic to determine whether we run +// storage migration logic. This should match directly with the semantic versions of the Rust crate. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +enum Releases { + V1_0_0, + V2_0_0, +} + +impl Default for Releases { + fn default() -> Self { + Releases::V1_0_0 + } +} + decl_storage! { trait Store for Module, I: Instance=DefaultInstance> as Balances { /// The total units issued in the system. @@ -365,10 +383,10 @@ decl_storage! { /// NOTE: Should only be accessed when setting, changing and freeing a lock. pub Locks get(fn locks): map hasher(blake2_256) T::AccountId => Vec>; - /// True if network has been upgraded to this version. + /// Storage version of the pallet. /// - /// True for new networks. - IsUpgraded build(|_: &GenesisConfig| true): bool; + /// This is set to v2.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; } add_extra_genesis { config(balances): Vec<(T::AccountId, T::Balance)>; @@ -516,11 +534,8 @@ decl_module! { >::transfer(&transactor, &dest, value, KeepAlive)?; } - fn on_initialize() { - if !IsUpgraded::::get() { - IsUpgraded::::put(true); - Self::do_upgrade(); - } + fn on_runtime_upgrade() { + migration::on_runtime_upgrade::(); } } } @@ -546,73 +561,6 @@ impl OldBalanceLock { impl, I: Instance> Module { // PRIVATE MUTABLES - // Upgrade from the pre-#4649 balances/vesting into the new balances. - pub fn do_upgrade() { - sp_runtime::print("Upgrading account balances..."); - // First, migrate from old FreeBalance to new Account. - // We also move all locks across since only accounts with FreeBalance values have locks. - // FreeBalance: map T::AccountId => T::Balance - for (hash, free) in StorageIterator::::new(b"Balances", b"FreeBalance").drain() { - let mut account = AccountData { free, ..Default::default() }; - // Locks: map T::AccountId => Vec - let old_locks = get_storage_value::>>(b"Balances", b"Locks", &hash); - if let Some(locks) = old_locks { - let locks = locks.into_iter() - .map(|i| { - let (result, expiry) = i.upgraded(); - if expiry != T::BlockNumber::max_value() { - // Any `until`s that are not T::BlockNumber::max_value come from - // democracy and need to be migrated over there. - // Democracy: Locks get(locks): map T::AccountId => Option; - put_storage_value(b"Democracy", b"Locks", &hash, expiry); - } - result - }) - .collect::>(); - for l in locks.iter() { - if l.reasons == Reasons::All || l.reasons == Reasons::Misc { - account.misc_frozen = account.misc_frozen.max(l.amount); - } - if l.reasons == Reasons::All || l.reasons == Reasons::Fee { - account.fee_frozen = account.fee_frozen.max(l.amount); - } - } - put_storage_value(b"Balances", b"Locks", &hash, locks); - } - put_storage_value(b"Balances", b"Account", &hash, account); - } - // Second, migrate old ReservedBalance into new Account. - // ReservedBalance: map T::AccountId => T::Balance - for (hash, reserved) in StorageIterator::::new(b"Balances", b"ReservedBalance").drain() { - let mut account = get_storage_value::>(b"Balances", b"Account", &hash).unwrap_or_default(); - account.reserved = reserved; - put_storage_value(b"Balances", b"Account", &hash, account); - } - - // Finally, migrate vesting and ensure locks are in place. We will be lazy and just lock - // for the maximum amount (i.e. at genesis). Users will need to call "vest" to reduce the - // lock to something sensible. - // pub Vesting: map T::AccountId => Option; - for (hash, vesting) in StorageIterator::<(T::Balance, T::Balance, T::BlockNumber)>::new(b"Balances", b"Vesting").drain() { - let mut account = get_storage_value::>(b"Balances", b"Account", &hash).unwrap_or_default(); - let mut locks = get_storage_value::>>(b"Balances", b"Locks", &hash).unwrap_or_default(); - locks.push(BalanceLock { - id: *b"vesting ", - amount: vesting.0.clone(), - reasons: Reasons::Misc, - }); - account.misc_frozen = account.misc_frozen.max(vesting.0.clone()); - put_storage_value(b"Vesting", b"Vesting", &hash, vesting); - put_storage_value(b"Balances", b"Locks", &hash, locks); - put_storage_value(b"Balances", b"Account", &hash, account); - } - - for (hash, balances) in StorageIterator::>::new(b"Balances", b"Account").drain() { - let nonce = take_storage_value::(b"System", b"AccountNonce", &hash).unwrap_or_default(); - put_storage_value(b"System", b"Account", &hash, (nonce, balances)); - } - } - /// Get the free balance of an account. pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { Self::account(who.borrow()).free @@ -721,14 +669,21 @@ impl, I: Instance> Module { } } }); - Locks::::insert(who, locks); - } -} -impl, I: Instance> OnReapAccount for Module { - fn on_reap_account(who: &T::AccountId) { - Locks::::remove(who); - Account::::remove(who); + let existed = Locks::::contains_key(who); + if locks.is_empty() { + Locks::::remove(who); + if existed { + // TODO: use Locks::::hashed_key + // https://github.com/paritytech/substrate/issues/4969 + system::Module::::dec_ref(who); + } + } else { + Locks::::insert(who, locks); + if !existed { + system::Module::::inc_ref(who); + } + } } } @@ -923,7 +878,7 @@ impl, I: Instance> frame_system::Trait for ElevatedTrait { type Version = T::Version; type ModuleToIndex = T::ModuleToIndex; type OnNewAccount = T::OnNewAccount; - type OnReapAccount = T::OnReapAccount; + type OnKilledAccount = T::OnKilledAccount; type AccountData = T::AccountData; } impl, I: Instance> Trait for ElevatedTrait { @@ -1040,6 +995,7 @@ impl, I: Instance> Currency for Module where )?; let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; + let allow_death = allow_death && system::Module::::allow_death(transactor); ensure!(allow_death || from_account.free >= ed, Error::::KeepAlive); Ok(()) @@ -1283,6 +1239,17 @@ impl, I: Instance> ReservableCurrency for Module } } +/// Implement `OnKilledAccount` to remove the local account, if using local account storage. +/// +/// NOTE: You probably won't need to use this! This only needs to be "wired in" to System module +/// if you're using the local balance storage. **If you're using the composite system account +/// storage (which is the default in most examples and tests) then there's no need.** +impl, I: Instance> OnKilledAccount for Module { + fn on_killed_account(who: &T::AccountId) { + Account::::remove(who); + } +} + impl, I: Instance> LockableCurrency for Module where T::Balance: MaybeSerializeDeserialize + Debug diff --git a/frame/balances/src/migration.rs b/frame/balances/src/migration.rs new file mode 100644 index 0000000000000000000000000000000000000000..16c764d59f1aa4f5e73812ab028141bbcd985405 --- /dev/null +++ b/frame/balances/src/migration.rs @@ -0,0 +1,88 @@ +use super::*; + +pub fn on_runtime_upgrade, I: Instance>() { + match StorageVersion::::get() { + Releases::V2_0_0 => return, + Releases::V1_0_0 => upgrade_v1_to_v2::(), + } +} + +// Upgrade from the pre-#4649 balances/vesting into the new balances. +fn upgrade_v1_to_v2, I: Instance>() { + sp_runtime::print("Upgrading account balances..."); + // First, migrate from old FreeBalance to new Account. + // We also move all locks across since only accounts with FreeBalance values have locks. + // FreeBalance: map T::AccountId => T::Balance + for (hash, free) in StorageIterator::::new(b"Balances", b"FreeBalance").drain() { + let mut account = AccountData { free, ..Default::default() }; + // Locks: map T::AccountId => Vec + let old_locks = get_storage_value::>>(b"Balances", b"Locks", &hash); + if let Some(locks) = old_locks { + let locks = locks.into_iter() + .map(|i| { + let (result, expiry) = i.upgraded(); + if expiry != T::BlockNumber::max_value() { + // Any `until`s that are not T::BlockNumber::max_value come from + // democracy and need to be migrated over there. + // Democracy: Locks get(locks): map T::AccountId => Option; + put_storage_value(b"Democracy", b"Locks", &hash, expiry); + } + result + }) + .collect::>(); + for l in locks.iter() { + if l.reasons == Reasons::All || l.reasons == Reasons::Misc { + account.misc_frozen = account.misc_frozen.max(l.amount); + } + if l.reasons == Reasons::All || l.reasons == Reasons::Fee { + account.fee_frozen = account.fee_frozen.max(l.amount); + } + } + put_storage_value(b"Balances", b"Locks", &hash, locks); + } + put_storage_value(b"Balances", b"Account", &hash, account); + } + // Second, migrate old ReservedBalance into new Account. + // ReservedBalance: map T::AccountId => T::Balance + for (hash, reserved) in StorageIterator::::new(b"Balances", b"ReservedBalance").drain() { + let mut account = get_storage_value::>(b"Balances", b"Account", &hash).unwrap_or_default(); + account.reserved = reserved; + put_storage_value(b"Balances", b"Account", &hash, account); + } + + // Finally, migrate vesting and ensure locks are in place. We will be lazy and just lock + // for the maximum amount (i.e. at genesis). Users will need to call "vest" to reduce the + // lock to something sensible. + // pub Vesting: map T::AccountId => Option; + for (hash, vesting) in StorageIterator::<(T::Balance, T::Balance, T::BlockNumber)>::new(b"Balances", b"Vesting").drain() { + let mut account = get_storage_value::>(b"Balances", b"Account", &hash).unwrap_or_default(); + let mut locks = get_storage_value::>>(b"Balances", b"Locks", &hash).unwrap_or_default(); + locks.push(BalanceLock { + id: *b"vesting ", + amount: vesting.0.clone(), + reasons: Reasons::Misc, + }); + account.misc_frozen = account.misc_frozen.max(vesting.0.clone()); + put_storage_value(b"Vesting", b"Vesting", &hash, vesting); + put_storage_value(b"Balances", b"Locks", &hash, locks); + put_storage_value(b"Balances", b"Account", &hash, account); + } + + for (hash, balances) in StorageIterator::>::new(b"Balances", b"Account").drain() { + let nonce = take_storage_value::(b"System", b"AccountNonce", &hash).unwrap_or_default(); + let mut refs: system::RefCount = 0; + // The items in Kusama that would result in a ref count being incremented. + if have_storage_value(b"Democracy", b"Proxy", &hash) { refs += 1 } + // We skip Recovered since it's being replaced anyway. + let mut prefixed_hash = twox_64(&b":session:keys"[..]).to_vec(); + prefixed_hash.extend(&b":session:keys"[..]); + prefixed_hash.extend(&hash[..]); + if have_storage_value(b"Session", b"NextKeys", &prefixed_hash) { refs += 1 } + if have_storage_value(b"Staking", b"Bonded", &hash) { refs += 1 } + put_storage_value(b"System", b"Account", &hash, (nonce, refs, &balances)); + } + + take_storage_value::(b"Balances", b"IsUpgraded", &[]); + + StorageVersion::::put(Releases::V2_0_0); +} diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 3d0c9e9207f5d82faf9b58ee3958fa11fdaf3b4c..98c7c856bc88a3e74c56c64e77107bc57bacd500 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -24,8 +24,10 @@ macro_rules! decl_tests { use sp_runtime::{Fixed64, traits::{SignedExtension, BadOrigin}}; use frame_support::{ assert_noop, assert_ok, assert_err, - traits::{LockableCurrency, LockIdentifier, WithdrawReason, WithdrawReasons, - Currency, ReservableCurrency, ExistenceRequirement::AllowDeath} + traits::{ + LockableCurrency, LockIdentifier, WithdrawReason, WithdrawReasons, + Currency, ReservableCurrency, ExistenceRequirement::AllowDeath, StoredMap + } }; use pallet_transaction_payment::ChargeTransactionPayment; use frame_system::RawOrigin; @@ -55,6 +57,15 @@ macro_rules! decl_tests { }); } + #[test] + fn account_should_be_reaped() { + <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_ok!(>::transfer(&1, &2, 10, AllowDeath)); + assert!(!<::AccountStore as StoredMap>>::is_explicit(&1)); + }); + } + #[test] fn partial_locking_should_work() { <$ext_builder>::default().existential_deposit(1).monied(true).build().execute_with(|| { diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index c566c9a9d009d69eb629f1d92b6801aaede7546b..3a5c2178f88cdc2fb8a7f74bc4a78a705588edcc 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -67,7 +67,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = super::AccountData; type OnNewAccount = (); - type OnReapAccount = Module; + type OnKilledAccount = (); } parameter_types! { pub const TransactionBaseFee: u64 = 0; diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index a63046e901d498eb8a1cfadb21976b6c8d4f3d37..861c1972127a0751dd4372fdec80cb8588d0e1f1 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -67,7 +67,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = super::AccountData; type OnNewAccount = (); - type OnReapAccount = Module; + type OnKilledAccount = Module; } parameter_types! { pub const TransactionBaseFee: u64 = 0; diff --git a/frame/benchmark/Cargo.toml b/frame/benchmark/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..bf237d992cd64cecc2c771a1bec4af4ffc02002d --- /dev/null +++ b/frame/benchmark/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "pallet-benchmark" +version = "2.0.0-alpha.3" +authors = ["Parity Technologies "] +edition = "2018" +license = "GPL-3.0" + +[dependencies] +serde = { version = "1.0.101", optional = true } +codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.3", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.3", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.3", default-features = false, path = "../system" } +frame-benchmarking = { version = "2.0.0-alpha.3", default-features = false, path = "../benchmarking" } + +[features] +default = ["std"] +std = [ + "serde", + "codec/std", + "sp-std/std", + "sp-io/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", + "frame-benchmarking/std", +] diff --git a/frame/benchmark/src/benchmarking.rs b/frame/benchmark/src/benchmarking.rs new file mode 100644 index 0000000000000000000000000000000000000000..29f9e8ee972a5bf95dd774eaa0cc6dda316e8de1 --- /dev/null +++ b/frame/benchmark/src/benchmarking.rs @@ -0,0 +1,131 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Benchmarks for common FRAME Pallet operations. + +use super::*; + +use frame_system::RawOrigin; +use sp_std::prelude::*; +use frame_benchmarking::{benchmarks, account}; + +use crate::Module as Benchmark; + +const SEED: u32 = 0; + +benchmarks! { + _ { + let m in 1 .. 1000 => { + let origin = RawOrigin::Signed(account("member", m, SEED)); + Benchmark::::add_member_list(origin.into())? + }; + let i in 1 .. 1000 => { + MyMap::insert(i, i); + }; + let d in 1 .. 1000 => { + for i in 0..d { + for j in 0..100 { + MyDoubleMap::insert(i, j, d); + } + } + }; + } + + add_member_list { + let m in ...; + }: _(RawOrigin::Signed(account("member", m + 1, SEED))) + + append_member_list { + let m in ...; + }: _(RawOrigin::Signed(account("member", m + 1, SEED))) + + read_value { + let n in 1 .. 1000; + MyValue::put(n); + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + put_value { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + exists_value { + let n in 1 .. 1000; + MyValue::put(n); + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + remove_value { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + read_map { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + insert_map { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + contains_key_map { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + remove_prefix { + let d in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), d) + + do_nothing { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + encode_accounts { + let a in 1 .. 1000; + let mut accounts = Vec::new(); + for _ in 0..a { + accounts.push(account::("encode", a, SEED)); + } + }: _(RawOrigin::Signed(account("user", 0, SEED)), accounts) + + decode_accounts { + let a in 1 .. 1000; + let mut accounts = Vec::new(); + for _ in 0..a { + accounts.push(account::("encode", a, SEED)); + } + let bytes = accounts.encode(); + }: _(RawOrigin::Signed(account("user", 0, SEED)), bytes) + + // Custom implementation to handle benchmarking of storage recalculation. + // Puts `repeat` number of items into random storage keys, and then times how + // long it takes to recalculate the storage root. + storage_root { + let z in 0 .. 10000; + }: { + for index in 0 .. z { + let random = (index).using_encoded(sp_io::hashing::blake2_256); + sp_io::storage::set(&random, &random); + } + } + + // Custom implementation to handle benchmarking of calling a host function. + // Will check how long it takes to call `current_time()`. + current_time { + let z in 0 .. 1000; + }: { + for _ in 0 .. z { + let _ = frame_benchmarking::benchmarking::current_time(); + } + } +} diff --git a/frame/benchmark/src/lib.rs b/frame/benchmark/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..ef7731eea43d4610a93213843ef04b26d13db1b8 --- /dev/null +++ b/frame/benchmark/src/lib.rs @@ -0,0 +1,177 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! A pallet that contains common runtime patterns in an isolated manner. +//! This pallet is **not** meant to be used in a production blockchain, just +//! for benchmarking and testing purposes. + +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{decl_module, decl_storage, decl_event, decl_error}; +use frame_support::traits::Currency; +use frame_system::{self as system, ensure_signed}; +use codec::{Encode, Decode}; +use sp_std::prelude::Vec; + +pub mod benchmarking; + +/// Type alias for currency balance. +pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; + +/// The pallet's configuration trait. +pub trait Trait: system::Trait { + type Event: From> + Into<::Event>; + type Currency: Currency; +} + +// This pallet's storage items. +decl_storage! { + trait Store for Module as Benchmark { + MyMemberList: Vec; + MyMemberMap: map hasher(blake2_256) T::AccountId => bool; + MyValue: u32; + MyMap: map hasher(blake2_256) u32 => u32; + MyDoubleMap: double_map hasher(blake2_256) u32, hasher(blake2_256) u32 => u32; + } +} + +// The pallet's events +decl_event!( + pub enum Event where AccountId = ::AccountId { + Dummy(u32, AccountId), + } +); + +// The pallet's errors +decl_error! { + pub enum Error for Module { + } +} + +// The pallet's dispatchable functions. +decl_module! { + /// The module declaration. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Do nothing. + pub fn do_nothing(_origin, input: u32) { + if input > 0 { + return Ok(()); + } + } + + /// Read a value from storage value `repeat` number of times. + /// Note the first `get()` read here will pull from the underlying + /// storage database, however, the `repeat` calls will all pull from the + /// storage overlay cache. You must consider this when analyzing the + /// results of the benchmark. + pub fn read_value(_origin, repeat: u32) { + for _ in 0..repeat { + MyValue::get(); + } + } + + /// Put a value into a storage value. + pub fn put_value(_origin, repeat: u32) { + for r in 0..repeat { + MyValue::put(r); + } + } + + /// Read a value from storage `repeat` number of times. + /// Note the first `exists()` read here will pull from the underlying + /// storage database, however, the `repeat` calls will all pull from the + /// storage overlay cache. You must consider this when analyzing the + /// results of the benchmark. + pub fn exists_value(_origin, repeat: u32) { + for _ in 0..repeat { + MyValue::exists(); + } + } + + /// Remove a value from storage `repeat` number of times. + pub fn remove_value(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::remove(r); + } + } + + /// Read a value from storage map `repeat` number of times. + pub fn read_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::get(r); + } + } + + /// Insert a value into a map. + pub fn insert_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::insert(r, r); + } + } + + /// Check is a map contains a value `repeat` number of times. + pub fn contains_key_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::contains_key(r); + } + } + + /// Read a value from storage `repeat` number of times. + pub fn remove_prefix(_origin, repeat: u32) { + for r in 0..repeat { + MyDoubleMap::remove_prefix(r); + } + } + + // Add user to the list. + pub fn add_member_list(origin) { + let who = ensure_signed(origin)?; + MyMemberList::::mutate(|x| x.push(who)); + } + + // Append user to the list. + pub fn append_member_list(origin) { + let who = ensure_signed(origin)?; + MyMemberList::::append(&[who])?; + } + + // Encode a vector of accounts to bytes. + pub fn encode_accounts(_origin, accounts: Vec) { + let bytes = accounts.encode(); + + // In an attempt to tell the compiler not to optimize away this benchmark, we will use + // the result of encoding the accounts. + if bytes.is_empty() { + frame_support::print("You are encoding zero accounts."); + } + } + + // Decode bytes into a vector of accounts. + pub fn decode_accounts(_origin, bytes: Vec) { + let accounts: Vec = Decode::decode(&mut bytes.as_slice()).map_err(|_| "Could not decode")?; + + // In an attempt to tell the compiler not to optimize away this benchmark, we will use + // the result of decoding the bytes. + if accounts.is_empty() { + frame_support::print("You are decoding zero bytes."); + } + } + } +} diff --git a/frame/benchmarking/Cargo.toml b/frame/benchmarking/Cargo.toml index c2748e080defcb1301af6c663491d10d10ed9262..b39031a6b77f4422b6ea558d0644201cdf8a3b8d 100644 --- a/frame/benchmarking/Cargo.toml +++ b/frame/benchmarking/Cargo.toml @@ -1,16 +1,31 @@ [package] name = "frame-benchmarking" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Macro for benchmarking a FRAME runtime." [dependencies] -codec = { package = "parity-scale-codec", version = "1.1.2", default-features = false } -sp-api = { version = "2.0.0", path = "../../primitives/api", default-features = false } -sp-runtime-interface = { version = "2.0.0", path = "../../primitives/runtime-interface", default-features = false } -sp-std = { version = "2.0.0", path = "../../primitives/std", default-features = false } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-api = { version = "2.0.0-alpha.2", path = "../../primitives/api", default-features = false } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../../primitives/runtime-interface", default-features = false } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime", default-features = false } +sp-std = { version = "2.0.0-alpha.2", path = "../../primitives/std", default-features = false } +sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.2" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [features] default = [ "std" ] -std = [ "sp-runtime-interface/std", "sp-api/std", "codec/std", "sp-std/std" ] +std = [ + "codec/std", + "sp-runtime-interface/std", + "sp-runtime/std", + "sp-api/std", + "sp-std/std", + "frame-support/std", + "frame-system/std", +] diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index c57cfb4914050f6f1b86d4c0b990877f37ca03a4..a18048d3053370ca4f89461d83436e00d1a1af74 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -14,79 +14,340 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Interfaces and types for benchmarking a FRAME runtime. +//! Macro for benchmarking a FRAME runtime. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::vec::Vec; +mod tests; +mod utils; +pub use utils::*; +#[doc(hidden)] +pub use sp_io::storage::root as storage_root; +pub use sp_runtime::traits::Dispatchable; -/// An alphabet of possible parameters to use for benchmarking. -#[derive(codec::Encode, codec::Decode, Clone, Copy, PartialEq, Debug)] -#[allow(missing_docs)] -pub enum BenchmarkParameter { - A, B, C, D, E, F, G, H, I, J, K, L, M, N, O, P, Q, R, S, T, U, V, W, X, Y, Z, -} - -/// Results from running benchmarks on a FRAME pallet. -/// Contains duration of the function call in nanoseconds along with the benchmark parameters -/// used for that benchmark result. -pub type BenchmarkResults = (Vec<(BenchmarkParameter, u32)>, u128); - -sp_api::decl_runtime_apis! { - /// Runtime api for benchmarking a FRAME runtime. - pub trait Benchmark { - /// Dispatch the given benchmark. - fn dispatch_benchmark( - module: Vec, - extrinsic: Vec, - steps: u32, - repeat: u32, - ) -> Option>; +/// Construct pallet benchmarks for weighing dispatchables. +/// +/// Works around the idea of complexity parameters, named by a single letter (which is usually +/// upper cased in complexity notation but is lower-cased for use in this macro). +/// +/// Complexity parameters ("parameters") have a range which is a `u32` pair. Every time a benchmark +/// is prepared and run, this parameter takes a concrete value within the range. There is an +/// associated instancing block, which is a single expression that is evaluated during +/// preparation. It may use `?` (`i.e. `return Err(...)`) to bail with a string error. Here's a +/// few examples: +/// +/// ```ignore +/// // These two are equivalent: +/// let x in 0 .. 10; +/// let x in 0 .. 10 => (); +/// // This one calls a setup function and might return an error (which would be terminal). +/// let y in 0 .. 10 => setup(y)?; +/// // This one uses a code block to do lots of stuff: +/// let z in 0 .. 10 => { +/// let a = z * z / 5; +/// let b = do_something(a)?; +/// combine_into(z, b); +/// } +/// ``` +/// +/// Note that due to parsing restrictions, if the `from` expression is not a single token (i.e. a +/// literal or constant), then it must be parenthesised. +/// +/// The macro allows for a number of "arms", each representing an individual benchmark. Using the +/// simple syntax, the associated dispatchable function maps 1:1 with the benchmark and the name of +/// the benchmark is the same as that of the associated function. However, extended syntax allows +/// for arbitrary expresions to be evaluated in a benchmark (including for example, +/// `on_initialize`). +/// +/// The macro allows for common parameters whose ranges and instancing expressions may be drawn upon +/// (or not) by each arm. Syntax is available to allow for only the range to be drawn upon if +/// desired, allowing an alternative instancing expression to be given. +/// +/// Each arm may also have a block of code which is run prior to any instancing and a block of code +/// which is run afterwards. All code blocks may draw upon the specific value of each parameter +/// at any time. Local variables are shared between the two pre- and post- code blocks, but do not +/// leak from the interior of any instancing expressions. +/// +/// Any common parameters that are unused in an arm do not have their instancing expressions +/// evaluated. +/// +/// Example: +/// ```ignore +/// benchmarks! { +/// // common parameter; just one for this example. +/// _ { +/// let l in 1 .. MAX_LENGTH => initialize_l(l); +/// } +/// +/// // first dispatchable: foo; this is a user dispatchable and operates on a `u8` vector of +/// // size `l`, which we allow to be initialized as usual. +/// foo { +/// let caller = account::(b"caller", 0, benchmarks_seed); +/// let l = ...; +/// }: _(Origin::Signed(caller), vec![0u8; l]) +/// +/// // second dispatchable: bar; this is a root dispatchable and accepts a `u8` vector of size +/// // `l`. We don't want it pre-initialized like before so we override using the `=> ()` notation. +/// // In this case, we explicitly name the call using `bar` instead of `_`. +/// bar { +/// let l = _ .. _ => (); +/// }: bar(Origin::Root, vec![0u8; l]) +/// +/// // third dispatchable: baz; this is a user dispatchable. It isn't dependent on length like the +/// // other two but has its own complexity `c` that needs setting up. It uses `caller` (in the +/// // pre-instancing block) within the code block. This is only allowed in the param instancers +/// // of arms. Instancers of common params cannot optimistically draw upon hypothetical variables +/// // that the arm's pre-instancing code block might have declared. +/// baz1 { +/// let caller = account::(b"caller", 0, benchmarks_seed); +/// let c = 0 .. 10 => setup_c(&caller, c); +/// }: baz(Origin::Signed(caller)) +/// +/// // this is a second benchmark of the baz dispatchable with a different setup. +/// baz2 { +/// let caller = account::(b"caller", 0, benchmarks_seed); +/// let c = 0 .. 10 => setup_c_in_some_other_way(&caller, c); +/// }: baz(Origin::Signed(caller)) +/// +/// // this is benchmarking some code that is not a dispatchable. +/// populate_a_set { +/// let x in 0 .. 10_000; +/// let mut m = Vec::::new(); +/// for i in 0..x { +/// m.insert(i); +/// } +/// }: { m.into_iter().collect::() } +/// } +/// ``` +#[macro_export] +macro_rules! benchmarks { + ( + _ { + $( + let $common:ident in $common_from:tt .. $common_to:expr => $common_instancer:expr; + )* + } + $( $rest:tt )* + ) => { + $crate::benchmarks_iter!({ + $( { $common , $common_from , $common_to , $common_instancer } )* + } ( ) $( $rest )* ); } } -/// Interface that provides functions for benchmarking the runtime. -#[sp_runtime_interface::runtime_interface] -pub trait Benchmarking { - /// Get the number of nanoseconds passed since the UNIX epoch - /// - /// WARNING! This is a non-deterministic call. Do not use this within - /// consensus critical logic. - fn current_time() -> u128 { - std::time::SystemTime::now().duration_since(std::time::SystemTime::UNIX_EPOCH) - .expect("Unix time doesn't go backwards; qed") - .as_nanos() - } - - /// Reset the trie database to the genesis state. - fn wipe_db(&mut self) { - self.wipe() - } - - /// Commit pending storage changes to the trie database and clear the database cache. - fn commit_db(&mut self) { - self.commit() +#[macro_export] +#[allow(missing_docs)] +macro_rules! benchmarks_iter { + // mutation arm: + ( + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: _ ( $origin:expr $( , $arg:expr )* ) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $common )* } ( $( $names )* ) $name { $( $code )* }: $name ( $origin $( , $arg )* ) $( $rest )* + } + }; + // mutation arm: + ( + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: $dispatch:ident ( $origin:expr $( , $arg:expr )* ) + $( $rest:tt )* + ) => { + $crate::benchmarks_iter! { + { $( $common )* } ( $( $names )* ) $name { $( $code )* }: { + as $crate::Dispatchable>::dispatch(Call::::$dispatch($($arg),*), $origin.into())?; + } $( $rest )* + } + }; + // iteration arm: + ( + { $( $common:tt )* } + ( $( $names:ident )* ) + $name:ident { $( $code:tt )* }: $eval:block + $( $rest:tt )* + ) => { + $crate::benchmark_backend! { + $name { $( $common )* } { } { $eval } { $( $code )* } + } + $crate::benchmarks_iter!( { $( $common )* } ( $( $names )* $name ) $( $rest )* ); + }; + // iteration-exit arm + ( { $( $common:tt )* } ( $( $names:ident )* ) ) => { + $crate::selected_benchmark!( $( $names ),* ); + $crate::impl_benchmark!( $( $names ),* ); } } -/// The pallet benchmarking trait. -pub trait Benchmarking { - /// Run the benchmarks for this pallet. - /// - /// Parameters - /// - `extrinsic`: The name of extrinsic function you want to benchmark encoded as bytes. - /// - `steps`: The number of sample points you want to take across the range of parameters. - /// - `repeat`: The number of times you want to repeat a benchmark. - fn run_benchmark(extrinsic: Vec, steps: u32, repeat: u32) -> Result, &'static str>; -} +#[macro_export] +#[allow(missing_docs)] +macro_rules! benchmark_backend { + // parsing arms + ($name:ident { + $( $common:tt )* + } { + $( PRE { $( $pre_parsed:tt )* } )* + } { $eval:block } { + let $pre_id:tt : $pre_ty:ty = $pre_ex:expr; + $( $rest:tt )* + } ) => { + $crate::benchmark_backend! { + $name { $( $common )* } { + $( PRE { $( $pre_parsed )* } )* + PRE { $pre_id , $pre_ty , $pre_ex } + } { $eval } { $( $rest )* } + } + }; + ($name:ident { + $( $common:tt )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $param:ident in ( $param_from:expr ) .. $param_to:expr => $param_instancer:expr; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { $( $common )* } { + $( $parsed )* + PARAM { $param , $param_from , $param_to , $param_instancer } + } { $eval } { $( $rest )* } + } + }; + // mutation arm to look after defaulting to a common param + ($name:ident { + $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $param:ident in ...; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { + $( { $common , $common_from , $common_to , $common_instancer } )* + } { + $( $parsed )* + } { $eval } { + let $param + in ({ $( let $common = $common_from; )* $param }) + .. ({ $( let $common = $common_to; )* $param }) + => ({ $( let $common = || -> Result<(), &'static str> { $common_instancer ; Ok(()) }; )* $param()? }); + $( $rest )* + } + } + }; + // mutation arm to look after defaulting only the range to common param + ($name:ident { + $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $param:ident in _ .. _ => $param_instancer:expr ; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { + $( { $common , $common_from , $common_to , $common_instancer } )* + } { + $( $parsed )* + } { $eval } { + let $param + in ({ $( let $common = $common_from; )* $param }) + .. ({ $( let $common = $common_to; )* $param }) + => $param_instancer ; + $( $rest )* + } + } + }; + // mutation arm to look after a single tt for param_from. + ($name:ident { + $( $common:tt )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $param:ident in $param_from:tt .. $param_to:expr => $param_instancer:expr ; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { $( $common )* } { $( $parsed )* } { $eval } { + let $param in ( $param_from ) .. $param_to => $param_instancer; + $( $rest )* + } + } + }; + // mutation arm to look after the default tail of `=> ()` + ($name:ident { + $( $common:tt )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $param:ident in $param_from:tt .. $param_to:expr; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { $( $common )* } { $( $parsed )* } { $eval } { + let $param in $param_from .. $param_to => (); + $( $rest )* + } + } + }; + // mutation arm to look after `let _ =` + ($name:ident { + $( $common:tt )* + } { + $( $parsed:tt )* + } { $eval:block } { + let $pre_id:tt = $pre_ex:expr; + $( $rest:tt )* + }) => { + $crate::benchmark_backend! { + $name { $( $common )* } { $( $parsed )* } { $eval } { + let $pre_id : _ = $pre_ex; + $( $rest )* + } + } + }; + // actioning arm + ($name:ident { + $( { $common:ident , $common_from:tt , $common_to:expr , $common_instancer:expr } )* + } { + $( PRE { $pre_id:tt , $pre_ty:ty , $pre_ex:expr } )* + $( PARAM { $param:ident , $param_from:expr , $param_to:expr , $param_instancer:expr } )* + } { $eval:block } { $( $post:tt )* } ) => { + #[allow(non_camel_case_types)] + struct $name; + #[allow(unused_variables)] + impl $crate::BenchmarkingSetup for $name { + fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { + vec! [ + $( + ($crate::BenchmarkParameter::$param, $param_from, $param_to) + ),* + ] + } -/// The required setup for creating a benchmark. -pub trait BenchmarkingSetup { - /// Return the components and their ranges which should be tested in this benchmark. - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; + fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) + -> Result Result<(), &'static str>>, &'static str> + { + $( + let $common = $common_from; + )* + $( + // Prepare instance + let $param = components.iter().find(|&c| c.0 == $crate::BenchmarkParameter::$param).unwrap().1; + )* + $( + let $pre_id : $pre_ty = $pre_ex; + )* + $( $param_instancer ; )* + $( $post )* - /// Set up the storage, and prepare a call and caller to test in a single run of the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result<(Call, RawOrigin), &'static str>; + Ok(Box::new(move || -> Result<(), &'static str> { $eval; Ok(()) })) + } + } + } } /// Creates a `SelectedBenchmark` enum implementing `BenchmarkingSetup`. @@ -109,33 +370,122 @@ macro_rules! selected_benchmark { $( $bench:ident ),* ) => { // The list of available benchmarks for this pallet. + #[allow(non_camel_case_types)] enum SelectedBenchmark { $( $bench, )* } // Allow us to select a benchmark from the list of available benchmarks. - impl $crate::BenchmarkingSetup, RawOrigin> for SelectedBenchmark { + impl $crate::BenchmarkingSetup for SelectedBenchmark { fn components(&self) -> Vec<($crate::BenchmarkParameter, u32, u32)> { match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetup< - T, - Call, - RawOrigin, - >>::components(&$bench), )* + $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::components(&$bench), )* } } fn instance(&self, components: &[($crate::BenchmarkParameter, u32)]) - -> Result<(Call, RawOrigin), &'static str> + -> Result Result<(), &'static str>>, &'static str> { match self { - $( Self::$bench => <$bench as $crate::BenchmarkingSetup< - T, - Call, - RawOrigin, - >>::instance(&$bench, components), )* + $( Self::$bench => <$bench as $crate::BenchmarkingSetup>::instance(&$bench, components), )* } } } }; } + +#[macro_export] +macro_rules! impl_benchmark { + ( + $( $name:ident ),* + ) => { + impl $crate::Benchmarking<$crate::BenchmarkResults> for Module { + fn run_benchmark( + extrinsic: Vec, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, + repeat: u32, + ) -> Result, &'static str> { + // Map the input to the selected benchmark. + let extrinsic = sp_std::str::from_utf8(extrinsic.as_slice()) + .map_err(|_| "`extrinsic` is not a valid utf8 string!")?; + let selected_benchmark = match extrinsic { + $( stringify!($name) => SelectedBenchmark::$name, )* + _ => return Err("Could not find extrinsic."), + }; + + // Warm up the DB + $crate::benchmarking::commit_db(); + $crate::benchmarking::wipe_db(); + + let components = >::components(&selected_benchmark); + let mut results: Vec<$crate::BenchmarkResults> = Vec::new(); + + // Default number of steps for a component. + let mut prev_steps = 10; + + // Select the component we will be benchmarking. Each component will be benchmarked. + for (idx, (name, low, high)) in components.iter().enumerate() { + // Get the number of steps for this component. + let steps = steps.get(idx).cloned().unwrap_or(prev_steps); + prev_steps = steps; + + let lowest = lowest_range_values.get(idx).cloned().unwrap_or(*low); + let highest = highest_range_values.get(idx).cloned().unwrap_or(*high); + + let diff = highest - lowest; + + // Create up to `STEPS` steps for that component between high and low. + let step_size = (diff / steps).max(1); + let num_of_steps = diff / step_size + 1; + + for s in 0..num_of_steps { + // This is the value we will be testing for component `name` + let component_value = lowest + step_size * s; + + // Select the max value for all the other components. + let c: Vec<($crate::BenchmarkParameter, u32)> = components.iter() + .enumerate() + .map(|(idx, (n, _, h))| + if n == name { + (*n, component_value) + } else { + (*n, *highest_range_values.get(idx).unwrap_or(h)) + } + ) + .collect(); + + // Run the benchmark `repeat` times. + for _ in 0..repeat { + // Set up the externalities environment for the setup we want to benchmark. + let closure_to_benchmark = >::instance(&selected_benchmark, &c)?; + + // Commit the externalities to the database, flushing the DB cache. + // This will enable worst case scenario for reading from the database. + $crate::benchmarking::commit_db(); + + // Time the extrinsic logic. + let start_extrinsic = $crate::benchmarking::current_time(); + closure_to_benchmark()?; + let finish_extrinsic = $crate::benchmarking::current_time(); + let elapsed_extrinsic = finish_extrinsic - start_extrinsic; + + // Time the storage root recalculation. + let start_storage_root = $crate::benchmarking::current_time(); + $crate::storage_root(); + let finish_storage_root = $crate::benchmarking::current_time(); + let elapsed_storage_root = finish_storage_root - start_storage_root; + + results.push((c.clone(), elapsed_extrinsic, elapsed_storage_root)); + + // Wipe the DB back to the genesis state. + $crate::benchmarking::wipe_db(); + } + } + } + return Ok(results); + } + } + } +} diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..4327476c4a6c7c59dae5019935ab06879d61e353 --- /dev/null +++ b/frame/benchmarking/src/tests.rs @@ -0,0 +1,166 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Tests for the module. + +#![cfg(test)] + +use super::*; +use codec::Decode; +use sp_std::prelude::*; +use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; +use frame_support::{dispatch::DispatchResult, decl_module, impl_outer_origin}; +use frame_system::{RawOrigin, ensure_signed, ensure_none}; + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { + fn dummy(origin, _n: u32) -> DispatchResult { + let _sender = ensure_signed(origin)?; + Ok(()) + } + + fn other_dummy(origin, _n: u32) -> DispatchResult { + let _sender = ensure_none(origin)?; + Ok(()) + } + } +} + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +pub trait Trait { + type Event; + type BlockNumber; + type AccountId: 'static + Default + Decode; + type Origin: From> + Into, Self::Origin>>; +} + +#[derive(Clone, Eq, PartialEq)] +pub struct Test; + +impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = (); + type MaximumBlockWeight = (); + type MaximumBlockLength = (); + type AvailableBlockRatio = (); + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); +} + +impl Trait for Test { + type Event = (); + type BlockNumber = u32; + type Origin = Origin; + type AccountId = u64; +} + +// This function basically just builds a genesis storage key/value store according to +// our desired mockup. +fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default().build_storage::().unwrap().into() +} + +benchmarks!{ + _ { + // Define a common range for `b`. + let b in 1 .. 1000 => (); + } + + dummy { + let b in ...; + let caller = account("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + + other_name { + let b in ...; + let caller = account("caller", 0, 0); + }: other_dummy (RawOrigin::Signed(caller), b.into()) + + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in 0..x { + m.push(i); + } + }: { + m.sort(); + } +} + +#[test] +fn benchmarks_macro_works() { + // Check benchmark creation for `dummy`. + let selected_benchmark = SelectedBenchmark::dummy; + + let components = >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_eq!(closure(), Ok(())); + }); +} + +#[test] +fn benchmarks_macro_rename_works() { + // Check benchmark creation for `other_dummy`. + let selected_benchmark = SelectedBenchmark::other_name; + let components = >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ).expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_eq!(closure(), Err("Bad origin")); + }); +} + +#[test] +fn benchmarks_macro_works_for_non_dispatchable() { + let selected_benchmark = SelectedBenchmark::sort_vector; + + let components = >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::x, 0, 10000)]); + + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::x, 1)], + ).expect("failed to create closure"); + + assert_eq!(closure(), Ok(())); +} diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs new file mode 100644 index 0000000000000000000000000000000000000000..bc6cfbcc86ea1e221cdceb7728fd89b2e7bb2bd9 --- /dev/null +++ b/frame/benchmarking/src/utils.rs @@ -0,0 +1,108 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Interfaces, types and utils for benchmarking a FRAME runtime. + +use codec::{Encode, Decode}; +use sp_std::{vec::Vec, prelude::Box}; +use sp_io::hashing::blake2_256; +use sp_runtime::RuntimeString; + +/// An alphabet of possible parameters to use for benchmarking. +#[derive(codec::Encode, codec::Decode, Clone, Copy, PartialEq, Debug)] +#[allow(missing_docs)] +#[allow(non_camel_case_types)] +pub enum BenchmarkParameter { + a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, +} + +/// Results from running benchmarks on a FRAME pallet. +/// Contains duration of the function call in nanoseconds along with the benchmark parameters +/// used for that benchmark result. +pub type BenchmarkResults = (Vec<(BenchmarkParameter, u32)>, u128, u128); + +sp_api::decl_runtime_apis! { + /// Runtime api for benchmarking a FRAME runtime. + pub trait Benchmark { + /// Dispatch the given benchmark. + fn dispatch_benchmark( + module: Vec, + extrinsic: Vec, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, + repeat: u32, + ) -> Result, RuntimeString>; + } +} + +/// Interface that provides functions for benchmarking the runtime. +#[sp_runtime_interface::runtime_interface] +pub trait Benchmarking { + /// Get the number of nanoseconds passed since the UNIX epoch + /// + /// WARNING! This is a non-deterministic call. Do not use this within + /// consensus critical logic. + fn current_time() -> u128 { + std::time::SystemTime::now().duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Unix time doesn't go backwards; qed") + .as_nanos() + } + + /// Reset the trie database to the genesis state. + fn wipe_db(&mut self) { + self.wipe() + } + + /// Commit pending storage changes to the trie database and clear the database cache. + fn commit_db(&mut self) { + self.commit() + } +} + +/// The pallet benchmarking trait. +pub trait Benchmarking { + /// Run the benchmarks for this pallet. + /// + /// Parameters + /// - `extrinsic`: The name of extrinsic function you want to benchmark encoded as bytes. + /// - `steps`: The number of sample points you want to take across the range of parameters. + /// - `lowest_range_values`: The lowest number for each range of parameters. + /// - `highest_range_values`: The highest number for each range of parameters. + /// - `repeat`: The number of times you want to repeat a benchmark. + fn run_benchmark( + extrinsic: Vec, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, + repeat: u32, + ) -> Result, &'static str>; +} + +/// The required setup for creating a benchmark. +pub trait BenchmarkingSetup { + /// Return the components and their ranges which should be tested in this benchmark. + fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; + + /// Set up the storage, and prepare a closure to test in a single run of the benchmark. + fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; +} + +/// Grab an account, seeded by a name and index. +pub fn account(name: &'static str, index: u32, seed: u32) -> AccountId { + let entropy = (name, index, seed).using_encoded(blake2_256); + AccountId::decode(&mut &entropy[..]).unwrap_or_default() +} diff --git a/frame/collective/Cargo.toml b/frame/collective/Cargo.toml index 8f7ffa76534908320aeed2761ef09b3d9d5a789c..60899feb4fbb96aa7eb44d637c5cae43bd52612a 100644 --- a/frame/collective/Cargo.toml +++ b/frame/collective/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-collective" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Collective system: Members of a set of account IDs can make their collective feelings known through dispatched calls from one of two specialized origins." [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index a3ea901a6fd248c4c775e024525d286e913ecdaa..b5620e34065f64415e93de725af1aa6cd372352c 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -18,7 +18,20 @@ //! through dispatched calls from one of two specialized origins. //! //! The membership can be provided in one of two ways: either directly, using the Root-dispatchable -//! function `set_members`, or indirectly, through implementing the `ChangeMembers` +//! function `set_members`, or indirectly, through implementing the `ChangeMembers`. +//! +//! A "prime" member may be set allowing their vote to act as the default vote in case of any +//! abstentions after the voting period. +//! +//! Voting happens through motions comprising a proposal (i.e. a curried dispatchable) plus a +//! number of approvals required for it to pass and be called. Motions are open for members to +//! vote on for a minimum period given by `MotionDuration`. As soon as the needed number of +//! approvals is given, the motion is closed and executed. If the number of approvals is not reached +//! during the voting period, then `close` may be called by any account in order to force the end +//! the motion explicitly. If a prime member is defined then their vote is used in place of any +//! abstentions and the proposal is executed if there are enough approvals counting the new votes. +//! +//! If there are not, or if no prime is set, then the motion is dropped without being executed. #![cfg_attr(not(feature = "std"), no_std)] #![recursion_limit="128"] @@ -30,7 +43,7 @@ use sp_runtime::traits::{Hash, EnsureOrigin}; use frame_support::weights::SimpleDispatchInfo; use frame_support::{ dispatch::{Dispatchable, Parameter}, codec::{Encode, Decode}, - traits::{ChangeMembers, InitializeMembers}, decl_module, decl_event, + traits::{Get, ChangeMembers, InitializeMembers}, decl_module, decl_event, decl_storage, decl_error, ensure, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -53,6 +66,9 @@ pub trait Trait: frame_system::Trait { /// The outer event type. type Event: From> + Into<::Event>; + + /// The time-out for council motions. + type MotionDuration: Get; } /// Origin for the collective module. @@ -71,7 +87,7 @@ pub type Origin = RawOrigin<::Ac #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. -pub struct Votes { +pub struct Votes { /// The proposal's unique index. index: ProposalIndex, /// The number of approval votes that are needed to pass the motion. @@ -80,6 +96,8 @@ pub struct Votes { ayes: Vec, /// The current set of voters that rejected it. nays: Vec, + /// The hard end time of this vote. + end: BlockNumber, } decl_storage! { @@ -91,11 +109,14 @@ decl_storage! { map hasher(blake2_256) T::Hash => Option<>::Proposal>; /// Votes on a given proposal, if it is ongoing. pub Voting get(fn voting): - map hasher(blake2_256) T::Hash => Option>; + map hasher(blake2_256) T::Hash => Option>; /// Proposals so far. pub ProposalCount get(fn proposal_count): u32; /// The current members of the collective. This is stored sorted (just by value). pub Members get(fn members): Vec; + /// The member who provides the default vote for any other members that do not vote before + /// the timeout. If None, then no member has that privilege. + pub Prime get(fn prime): Option; } add_extra_genesis { config(phantom): sp_std::marker::PhantomData; @@ -123,6 +144,8 @@ decl_event! { Executed(Hash, bool), /// A single member did some action; `bool` is true if returned without error. MemberExecuted(Hash, bool), + /// A proposal was closed after its duration was up. + Closed(Hash, MemberCount, MemberCount), } } @@ -140,6 +163,8 @@ decl_error! { DuplicateVote, /// Members are already initialized! AlreadyInitialized, + /// The close call is made too early, before the end of the voting. + TooEarly, } } @@ -152,19 +177,21 @@ decl_module! { fn deposit_event() = default; - /// Set the collective's membership manually to `new_members`. Be nice to the chain and - /// provide it pre-sorted. + /// Set the collective's membership. + /// + /// - `new_members`: The new member list. Be nice to the chain and + // provide it sorted. + /// - `prime`: The prime member whose vote sets the default. /// /// Requires root origin. #[weight = SimpleDispatchInfo::FixedOperational(100_000)] - fn set_members(origin, new_members: Vec) { + fn set_members(origin, new_members: Vec, prime: Option) { ensure_root(origin)?; let mut new_members = new_members; new_members.sort(); - >::mutate(|m| { - >::set_members_sorted(&new_members[..], m); - *m = new_members; - }); + let old = Members::::get(); + >::set_members_sorted(&new_members[..], &old); + Prime::::set(prime); } /// Dispatch a proposal from a member using the `Member` origin. @@ -202,7 +229,8 @@ decl_module! { >::mutate(|i| *i += 1); >::mutate(|proposals| proposals.push(proposal_hash)); >::insert(proposal_hash, *proposal); - let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![] }; + let end = system::Module::::block_number() + T::MotionDuration::get(); + let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![], end }; >::insert(proposal_hash, votes); Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); @@ -249,32 +277,55 @@ decl_module! { Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); let seats = Self::members().len() as MemberCount; + let approved = yes_votes >= voting.threshold; let disapproved = seats.saturating_sub(no_votes) < voting.threshold; if approved || disapproved { - if approved { - Self::deposit_event(RawEvent::Approved(proposal)); - - // execute motion, assuming it exists. - if let Some(p) = >::take(&proposal) { - let origin = RawOrigin::Members(voting.threshold, seats).into(); - let ok = p.dispatch(origin).is_ok(); - Self::deposit_event(RawEvent::Executed(proposal, ok)); - } - } else { - // disapproved - >::remove(&proposal); - Self::deposit_event(RawEvent::Disapproved(proposal)); - } - - // remove vote - >::remove(&proposal); - >::mutate(|proposals| proposals.retain(|h| h != &proposal)); + Self::finalize_proposal(approved, seats, voting, proposal); } else { - // update voting - >::insert(&proposal, voting); + Voting::::insert(&proposal, voting); } } + + /// May be called by any signed account after the voting duration has ended in order to + /// finish voting and close the proposal. + /// + /// Abstentions are counted as rejections unless there is a prime member set and the prime + /// member cast an approval. + /// + /// - the weight of `proposal` preimage. + /// - up to three events deposited. + /// - one read, two removals, one mutation. (plus three static reads.) + /// - computation and i/o `O(P + L + M)` where: + /// - `M` is number of members, + /// - `P` is number of active proposals, + /// - `L` is the encoded length of `proposal` preimage. + #[weight = SimpleDispatchInfo::FixedOperational(200_000)] + fn close(origin, proposal: T::Hash, #[compact] index: ProposalIndex) { + let _ = ensure_signed(origin)?; + + let voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + ensure!(voting.index == index, Error::::WrongIndex); + ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); + + // default to true only if there's a prime and they voted in favour. + let default = Self::prime().map_or( + false, + |who| voting.ayes.iter().any(|a| a == &who), + ); + + let mut no_votes = voting.nays.len() as MemberCount; + let mut yes_votes = voting.ayes.len() as MemberCount; + let seats = Self::members().len() as MemberCount; + let abstentions = seats - (yes_votes + no_votes); + match default { + true => yes_votes += abstentions, + false => no_votes += abstentions, + } + + Self::deposit_event(RawEvent::Closed(proposal, yes_votes, no_votes)); + Self::finalize_proposal(yes_votes >= voting.threshold, seats, voting, proposal); + } } } @@ -282,10 +333,54 @@ impl, I: Instance> Module { pub fn is_member(who: &T::AccountId) -> bool { Self::members().contains(who) } + + /// Weight: + /// If `approved`: + /// - the weight of `proposal` preimage. + /// - two events deposited. + /// - two removals, one mutation. + /// - computation and i/o `O(P + L)` where: + /// - `P` is number of active proposals, + /// - `L` is the encoded length of `proposal` preimage. + /// + /// If not `approved`: + /// - one event deposited. + /// Two removals, one mutation. + /// Computation and i/o `O(P)` where: + /// - `P` is number of active proposals + fn finalize_proposal( + approved: bool, + seats: MemberCount, + voting: Votes, + proposal: T::Hash, + ) { + if approved { + Self::deposit_event(RawEvent::Approved(proposal)); + + // execute motion, assuming it exists. + if let Some(p) = ProposalOf::::take(&proposal) { + let origin = RawOrigin::Members(voting.threshold, seats).into(); + let ok = p.dispatch(origin).is_ok(); + Self::deposit_event(RawEvent::Executed(proposal, ok)); + } + } else { + // disapproved + ProposalOf::::remove(&proposal); + Self::deposit_event(RawEvent::Disapproved(proposal)); + } + + // remove vote + Voting::::remove(&proposal); + Proposals::::mutate(|proposals| proposals.retain(|h| h != &proposal)); + } } impl, I: Instance> ChangeMembers for Module { - fn change_members_sorted(_incoming: &[T::AccountId], outgoing: &[T::AccountId], new: &[T::AccountId]) { + fn change_members_sorted( + _incoming: &[T::AccountId], + outgoing: &[T::AccountId], + new: &[T::AccountId], + ) { // remove accounts from all current voting in motions. let mut outgoing = outgoing.to_vec(); outgoing.sort_unstable(); @@ -302,7 +397,12 @@ impl, I: Instance> ChangeMembers for Module { } ); } - >::put(new); + Members::::put(new); + Prime::::kill(); + } + + fn set_prime(prime: Option) { + Prime::::set(prime); } } @@ -415,6 +515,7 @@ mod tests { pub const MaximumBlockWeight: Weight = 1024; pub const MaximumBlockLength: u32 = 2 * 1024; pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MotionDuration: u64 = 3; } impl frame_system::Trait for Test { type Origin = Origin; @@ -435,17 +536,19 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for Test { type Origin = Origin; type Proposal = Call; type Event = Event; + type MotionDuration = MotionDuration; } impl Trait for Test { type Origin = Origin; type Proposal = Call; type Event = Event; + type MotionDuration = MotionDuration; } pub type Block = sp_runtime::generic::Block; @@ -486,22 +589,101 @@ mod tests { Call::System(frame_system::Call::remark(value.encode())) } + #[test] + fn close_works() { + make_ext().execute_with(|| { + System::set_block_number(1); + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(3); + assert_noop!( + Collective::close(Origin::signed(4), hash.clone(), 0), + Error::::TooEarly + ); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + assert_eq!(System::events(), vec![ + record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) + ]); + }); + } + + #[test] + fn close_with_prime_works() { + make_ext().execute_with(|| { + System::set_block_number(1); + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(3))); + + assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + assert_eq!(System::events(), vec![ + record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), + record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) + ]); + }); + } + + #[test] + fn close_with_voting_prime_works() { + make_ext().execute_with(|| { + System::set_block_number(1); + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(1))); + + assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { phase: Phase::Finalization, event, topics: vec![] }; + assert_eq!(System::events(), vec![ + record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), + record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), + record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 3, 0))), + record(Event::collective_Instance1(RawEvent::Approved(hash.clone()))), + record(Event::collective_Instance1(RawEvent::Executed(hash.clone(), false))) + ]); + }); + } + #[test] fn removal_of_old_voters_votes_works() { make_ext().execute_with(|| { System::set_block_number(1); let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![] }) + Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![] }) + Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); let proposal = make_proposal(69); @@ -510,12 +692,12 @@ mod tests { assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3] }) + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); Collective::change_members_sorted(&[], &[3], &[2, 4]); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![] }) + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); } @@ -526,16 +708,17 @@ mod tests { System::set_block_number(1); let proposal = make_proposal(42); let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![] }) + Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) ); - assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 3, 4])); + assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 3, 4], None)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![] }) + Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) ); let proposal = make_proposal(69); @@ -544,12 +727,12 @@ mod tests { assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3] }) + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) ); - assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 4])); + assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 4], None)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![] }) + Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) ); }); } @@ -560,12 +743,13 @@ mod tests { System::set_block_number(1); let proposal = make_proposal(42); let hash = proposal.blake2_256().into(); + let end = 4; assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); assert_eq!(Collective::proposals(), vec![hash]); assert_eq!(Collective::proposal_of(&hash), Some(proposal)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1], nays: vec![] }) + Some(Votes { index: 0, threshold: 3, ayes: vec![1], nays: vec![], end }) ); assert_eq!(System::events(), vec![ @@ -629,10 +813,11 @@ mod tests { System::set_block_number(1); let proposal = make_proposal(42); let hash: H256 = proposal.blake2_256().into(); + let end = 4; assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![] }) + Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) ); assert_noop!( Collective::vote(Origin::signed(1), hash.clone(), 0, true), @@ -641,7 +826,7 @@ mod tests { assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, false)); assert_eq!( Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1] }) + Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) ); assert_noop!( Collective::vote(Origin::signed(1), hash.clone(), 0, false), diff --git a/frame/contracts/Cargo.toml b/frame/contracts/Cargo.toml index 159e9f9d0c199464d13f044b4ea92a475a38323a..86f39f8a82b94d65e2da3e8535174d2ecde4ea77 100644 --- a/frame/contracts/Cargo.toml +++ b/frame/contracts/Cargo.toml @@ -1,32 +1,35 @@ [package] name = "pallet-contracts" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for WASM contracts" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } pwasm-utils = { version = "0.12.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } parity-wasm = { version = "0.41.0", default-features = false } wasmi-validation = { version = "0.3.0", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-sandbox = { version = "0.8.0", default-features = false, path = "../../primitives/sandbox" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "common" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-sandbox = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/sandbox" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "common" } [dev-dependencies] wabt = "0.9.2" assert_matches = "1.3.0" hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-randomness-collective-flip = { version = "2.0.0", path = "../randomness-collective-flip" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-timestamp = { version = "2.0.0-alpha.2", path = "../timestamp" } +pallet-randomness-collective-flip = { version = "2.0.0-alpha.2", path = "../randomness-collective-flip" } [features] default = ["std"] diff --git a/frame/contracts/common/Cargo.toml b/frame/contracts/common/Cargo.toml index 6e4ee050bd1978b3da839e5f2d07a68192d05390..0d009e7c82b41285fe12abdbe68ac07343c80985 100644 --- a/frame/contracts/common/Cargo.toml +++ b/frame/contracts/common/Cargo.toml @@ -1,14 +1,18 @@ [package] name = "pallet-contracts-primitives" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "A crate that hosts a common definitions that are relevant for the pallet-contracts." [dependencies] # This crate should not rely on any of the frame primitives. -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } [features] default = ["std"] diff --git a/frame/contracts/rpc/Cargo.toml b/frame/contracts/rpc/Cargo.toml index d59260d11f536da807679531e4350edaf9d4e8d8..092d049c5414ba7964860732fd8f8b3e2e3022a1 100644 --- a/frame/contracts/rpc/Cargo.toml +++ b/frame/contracts/rpc/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-contracts-rpc" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Node-specific RPC methods for interaction with contracts." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } jsonrpc-core = "14.0.3" jsonrpc-core-client = "14.0.3" jsonrpc-derive = "14.0.3" -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-alpha.2", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -pallet-contracts-primitives = { version = "2.0.0", path = "../common" } -pallet-contracts-rpc-runtime-api = { version = "0.8.0", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +pallet-contracts-primitives = { version = "2.0.0-alpha.2", path = "../common" } +pallet-contracts-rpc-runtime-api = { version = "0.8.0-alpha.2", path = "./runtime-api" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/contracts/rpc/runtime-api/Cargo.toml b/frame/contracts/rpc/runtime-api/Cargo.toml index dad9b92f6a30891b30f5307a3d732b87f05b9f49..8435a6c4238c9e07d60c79a49aed9ae282993a76 100644 --- a/frame/contracts/rpc/runtime-api/Cargo.toml +++ b/frame/contracts/rpc/runtime-api/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "pallet-contracts-rpc-runtime-api" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime API definition required by Contracts RPC extensions." [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } -pallet-contracts-primitives = { version = "2.0.0", default-features = false, path = "../../common" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/runtime" } +pallet-contracts-primitives = { version = "2.0.0-alpha.2", default-features = false, path = "../../common" } [features] default = ["std"] diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 5204f1003a6c5920714c483a4329de9c1cfdf386..374c55c374d7bf26c0603017f3838b9c0a6a0bb7 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -151,7 +151,7 @@ impl AccountDb for DirectAccountDb { let exists = !T::Currency::total_balance(&address).is_zero(); total_imbalance = total_imbalance.merge(imbalance); if existed && !exists { - // Account killed. This will ultimately lead to calling `OnReapAccount` callback + // Account killed. This will ultimately lead to calling `OnKilledAccount` callback // which will make removal of CodeHashOf and AccountStorage for this account. // In order to avoid writing over the deleted properties we `continue` here. continue; diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index c8572daaa43d487efd84725b26a6492beaa63794..362f15f3aae795ef44b346285de79cd557291a23 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -250,7 +250,11 @@ pub fn refund_unused_gas( pub fn approx_gas_for_balance(gas_price: Balance, balance: Balance) -> Gas where Balance: AtLeast32Bit { - (balance / gas_price).saturated_into::() + if gas_price.is_zero() { + Zero::zero() + } else { + (balance / gas_price).saturated_into::() + } } /// A simple utility macro that helps to match against a @@ -294,7 +298,7 @@ macro_rules! match_tokens { #[cfg(test)] mod tests { use super::{GasMeter, Token}; - use crate::tests::Test; + use crate::{tests::Test, gas::approx_gas_for_balance}; /// A trivial token that charges the specified number of gas units. #[derive(Copy, Clone, PartialEq, Eq, Debug)] @@ -382,4 +386,22 @@ mod tests { let mut gas_meter = GasMeter::::with_limit(25, 10); assert!(!gas_meter.charge(&(), SimpleToken(25)).is_out_of_gas()); } + + // A unit test for `fn approx_gas_for_balance()`, and makes + // sure setting gas_price 0 does not cause `div by zero` error. + #[test] + fn approx_gas_for_balance_works() { + let tests = vec![ + (approx_gas_for_balance(0_u64, 123), 0), + (approx_gas_for_balance(0_u64, 456), 0), + (approx_gas_for_balance(1_u64, 123), 123), + (approx_gas_for_balance(1_u64, 456), 456), + (approx_gas_for_balance(100_u64, 900), 9), + (approx_gas_for_balance(123_u64, 900), 7), + ]; + + for (lhs, rhs) in tests { + assert_eq!(lhs, rhs); + } + } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 42cbaa3a7c2af2e7251d885c1e55c2187fa145b8..93e470a51bb83fbccb28dba4709985c22d7362e8 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -125,7 +125,7 @@ use frame_support::{ parameter_types, IsSubType, weights::DispatchInfo, }; -use frame_support::traits::{OnReapAccount, OnUnbalanced, Currency, Get, Time, Randomness}; +use frame_support::traits::{OnKilledAccount, OnUnbalanced, Currency, Get, Time, Randomness}; use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; use pallet_contracts_primitives::{RentProjection, ContractAccessError}; @@ -254,7 +254,7 @@ where let mut buf = Vec::new(); storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); buf.extend_from_slice(code_hash.as_ref()); - RawTombstoneContractInfo(Hasher::hash(&buf[..]), PhantomData) + RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) } } @@ -941,8 +941,12 @@ decl_storage! { } } -impl OnReapAccount for Module { - fn on_reap_account(who: &T::AccountId) { +// TODO: this should be removed in favour of a self-destruct contract host function allowing the +// contract to delete all storage and the `ContractInfoOf` key and transfer remaining balance to +// some other account. As it stands, it's an economic insecurity on any smart-contract chain. +// https://github.com/paritytech/substrate/issues/4952 +impl OnKilledAccount for Module { + fn on_killed_account(who: &T::AccountId) { if let Some(ContractInfo::Alive(info)) = >::take(who) { child::kill_storage(&info.trie_id, info.child_trie_unique_id()); } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index ddd532334c158181feb8d3041f4f6984d45db727..51da3e8d1ff97ee0048290d073ac53e6e10f291f 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -117,7 +117,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = (Balances, Contracts); + type OnKilledAccount = Contracts; } impl pallet_balances::Trait for Test { type Balance = u64; @@ -1606,7 +1606,7 @@ fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: assert_eq!(System::events(), vec![ EventRecord { phase: Phase::ApplyExtrinsic(0), - event: MetaEvent::system(system::RawEvent::ReapedAccount(DJANGO)), + event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), topics: vec![], }, EventRecord { @@ -2079,6 +2079,22 @@ fn deploy_and_call_other_contract() { }); } +#[test] +fn deploy_works_without_gas_price() { + let (wasm, code_hash) = compile_module::(CODE_GET_RUNTIME_STORAGE).unwrap(); + ExtBuilder::default().existential_deposit(50).gas_price(0).build().execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + }); +} + const CODE_SELF_DESTRUCT: &str = r#" (module (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) diff --git a/frame/democracy/Cargo.toml b/frame/democracy/Cargo.toml index 2428d2c1544dcf7309294f9238b2031cb8cd3210..d2938956bd015333e28c7bbd67fc4a61a1bea282 100644 --- a/frame/democracy/Cargo.toml +++ b/frame/democracy/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-democracy" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for democracy" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index c2c44918b036cfb05974c7f748870b36417d3754..5632103ab317e684d9bf5b70e7258da2873b25ea 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -14,7 +14,140 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -//! Democratic system: Handles administration of general stakeholder voting. +//! # Democracy Pallet +//! +//! - [`democracy::Trait`](./trait.Trait.html) +//! - [`Call`](./enum.Call.html) +//! +//! ## Overview +//! +//! The Democracy pallet handles the administration of general stakeholder voting. +//! +//! There are two different queues that a proposal can be added to before it +//! becomes a referendum, 1) the proposal queue consisting of all public proposals +//! and 2) the external queue consisting of a single proposal that originates +//! from one of the _external_ origins (such as a collective group). +//! +//! Every launch period - a length defined in the runtime - the Democracy pallet +//! launches a referendum from a proposal that it takes from either the proposal +//! queue or the external queue in turn. Any token holder in the system can vote +//! on referenda. The voting system +//! uses time-lock voting by allowing the token holder to set their _conviction_ +//! behind a vote. The conviction will dictate the length of time the tokens +//! will be locked, as well as the multiplier that scales the vote power. +//! +//! ### Terminology +//! +//! - **Enactment Period:** The minimum period of locking and the period between a proposal being +//! approved and enacted. +//! - **Lock Period:** A period of time after proposal enactment that the tokens of _winning_ voters +//! will be locked. +//! - **Conviction:** An indication of a voter's strength of belief in their vote. An increase +//! of one in conviction indicates that a token holder is willing to lock their tokens for twice +//! as many lock periods after enactment. +//! - **Vote:** A value that can either be in approval ("Aye") or rejection ("Nay") +//! of a particular referendum. +//! - **Proposal:** A submission to the chain that represents an action that a proposer (either an +//! account or an external origin) suggests that the system adopt. +//! - **Referendum:** A proposal that is in the process of being voted on for +//! either acceptance or rejection as a change to the system. +//! - **Proxy:** An account that votes on behalf of a separate "Stash" account +//! that holds the funds. +//! - **Delegation:** The act of granting your voting power to the decisions of another account. +//! +//! ### Adaptive Quorum Biasing +//! +//! A _referendum_ can be either simple majority-carries in which 50%+1 of the +//! votes decide the outcome or _adaptive quorum biased_. Adaptive quorum biasing +//! makes the threshold for passing or rejecting a referendum higher or lower +//! depending on how the referendum was originally proposed. There are two types of +//! adaptive quorum biasing: 1) _positive turnout bias_ makes a referendum +//! require a super-majority to pass that decreases as turnout increases and +//! 2) _negative turnout bias_ makes a referendum require a super-majority to +//! reject that decreases as turnout increases. Another way to think about the +//! quorum biasing is that _positive bias_ referendums will be rejected by +//! default and _negative bias_ referendums get passed by default. +//! +//! ## Interface +//! +//! ### Dispatchable Functions +//! +//! #### Public +//! +//! These calls can be made from any externally held account capable of creating +//! a signed extrinsic. +//! +//! - `propose` - Submits a sensitive action, represented as a hash. +//! Requires a deposit. +//! - `second` - Signals agreement with a proposal, moves it higher on the +//! proposal queue, and requires a matching deposit to the original. +//! - `vote` - Votes in a referendum, either the vote is "Aye" to enact the +//! proposal or "Nay" to keep the status quo. +//! - `proxy_vote` - Votes in a referendum on behalf of a stash account. +//! - `activate_proxy` - Activates a proxy that is already open to the sender. +//! - `close_proxy` - Clears the proxy status, called by the proxy. +//! - `deactivate_proxy` - Deactivates a proxy back to the open status, called by +//! the stash. +//! - `open_proxy` - Opens a proxy account on behalf of the sender. +//! - `delegate` - Delegates the voting power (tokens * conviction) to another +//! account. +//! - `undelegate` - Stops the delegation of voting power to another account. +//! - `note_preimage` - Registers the preimage for an upcoming proposal, requires +//! a deposit that is returned once the proposal is enacted. +//! - `note_imminent_preimage` - Registers the preimage for an upcoming proposal. +//! Does not require a deposit, but the proposal must be in the dispatch queue. +//! - `reap_preimage` - Removes the preimage for an expired proposal. Will only +//! work under the condition that it's the same account that noted it and +//! after the voting period, OR it's a different account after the enactment period. +//! - `unlock` - Unlocks tokens that have an expired lock. +//! +//! #### Cancellation Origin +//! +//! This call can only be made by the `CancellationOrigin`. +//! +//! - `emergency_cancel` - Schedules an emergency cancellation of a referendum. +//! Can only happen once to a specific referendum. +//! +//! #### ExternalOrigin +//! +//! This call can only be made by the `ExternalOrigin`. +//! +//! - `external_propose` - Schedules a proposal to become a referendum once it is is legal +//! for an externally proposed referendum. +//! +//! #### External Majority Origin +//! +//! This call can only be made by the `ExternalMajorityOrigin`. +//! +//! - `external_propose_majority` - Schedules a proposal to become a majority-carries +//! referendum once it is legal for an externally proposed referendum. +//! +//! #### External Default Origin +//! +//! This call can only be made by the `ExternalDefaultOrigin`. +//! +//! - `external_propose_default` - Schedules a proposal to become a negative-turnout-bias +//! referendum once it is legal for an externally proposed referendum. +//! +//! #### Fast Track Origin +//! +//! This call can only be made by the `FastTrackOrigin`. +//! +//! - `fast_track` - Schedules the current externally proposed proposal that +//! is "majority-carries" to become a referendum immediately. +//! +//! #### Veto Origin +//! +//! This call can only be made by the `VetoOrigin`. +//! +//! - `veto_external` - Vetoes and blacklists the external proposal hash. +//! +//! #### Root +//! +//! - `cancel_referendum` - Removes a referendum. +//! - `cancel_queued` - Cancels a proposal that is queued for enactment. +//! - `clear_public_proposal` - Removes all public proposals. + #![recursion_limit="128"] #![cfg_attr(not(feature = "std"), no_std)] @@ -30,7 +163,7 @@ use frame_support::{ weights::SimpleDispatchInfo, traits::{ Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, - OnReapAccount, OnUnbalanced, BalanceStatus + OnUnbalanced, BalanceStatus } }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -260,6 +393,24 @@ impl ReferendumInfo } } +/// State of a proxy voting account. +#[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] +pub enum ProxyState { + /// Account is open to becoming a proxy but is not yet assigned. + Open(AccountId), + /// Account is actively being a proxy. + Active(AccountId), +} + +impl ProxyState { + fn as_active(self) -> Option { + match self { + ProxyState::Active(a) => Some(a), + ProxyState::Open(_) => None, + } + } +} + decl_storage! { trait Store for Module as Democracy { /// The number of (public) proposals that have been made so far. @@ -299,7 +450,7 @@ decl_storage! { /// Who is able to vote for whom. Value is the fund-holding account, key is the /// vote-transaction-sending account. - pub Proxy get(fn proxy): map hasher(blake2_256) T::AccountId => Option; + pub Proxy get(fn proxy): map hasher(blake2_256) T::AccountId => Option>; /// Get the account (and lock periods) to which another account is delegating vote. pub Delegations get(fn delegations): @@ -423,6 +574,12 @@ decl_error! { NotLocked, /// The lock on the account to be unlocked has not yet expired. NotExpired, + /// A proxy-pairing was attempted to an account that was not open. + NotOpen, + /// A proxy-pairing was attempted to an account that was open to another account. + WrongOpen, + /// A proxy-de-pairing was attempted to an account that was not active. + NotActive } } @@ -458,8 +615,16 @@ decl_module! { /// Propose a sensitive action to be taken. /// + /// The dispatch origin of this call must be _Signed_ and the sender must + /// have funds to cover the deposit. + /// + /// - `proposal_hash`: The hash of the proposal preimage. + /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). + /// + /// Emits `Proposed`. + /// /// # - /// - O(1). + /// - `O(1)`. /// - Two DB changes, one DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] @@ -481,10 +646,15 @@ decl_module! { Self::deposit_event(RawEvent::Proposed(index, value)); } - /// Propose a sensitive action to be taken. + /// Signals agreement with a particular proposal. + /// + /// The dispatch origin of this call must be _Signed_ and the sender + /// must have funds to cover the deposit, equal to the original deposit. + /// + /// - `proposal`: The index of the proposal to second. /// /// # - /// - O(1). + /// - `O(1)`. /// - One DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] @@ -500,8 +670,13 @@ decl_module! { /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; /// otherwise it is a vote to keep the status quo. /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `ref_index`: The index of the referendum to vote for. + /// - `vote`: The vote configuration. + /// /// # - /// - O(1). + /// - `O(1)`. /// - One DB change, one DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] @@ -516,8 +691,13 @@ decl_module! { /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact /// the proposal; otherwise it is a vote to keep the status quo. /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `ref_index`: The index of the referendum to proxy vote for. + /// - `vote`: The vote configuration. + /// /// # - /// - O(1). + /// - `O(1)`. /// - One DB change, one DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] @@ -525,12 +705,21 @@ decl_module! { #[compact] ref_index: ReferendumIndex, vote: Vote ) -> DispatchResult { - let who = Self::proxy(ensure_signed(origin)?).ok_or(Error::::NotProxy)?; - Self::do_vote(who, ref_index, vote) + let who = ensure_signed(origin)?; + let voter = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::do_vote(voter, ref_index, vote) } /// Schedule an emergency cancellation of a referendum. Cannot happen twice to the same /// referendum. + /// + /// The dispatch origin of this call must be `CancellationOrigin`. + /// + /// -`ref_index`: The index of the referendum to cancel. + /// + /// # + /// - Depends on size of storage vec `VotersFor` for this referendum. + /// # #[weight = SimpleDispatchInfo::FixedOperational(500_000)] fn emergency_cancel(origin, ref_index: ReferendumIndex) { T::CancellationOrigin::ensure_origin(origin)?; @@ -545,6 +734,15 @@ decl_module! { /// Schedule a referendum to be tabled once it is legal to schedule an external /// referendum. + /// + /// The dispatch origin of this call must be `ExternalOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] fn external_propose(origin, proposal_hash: T::Hash) { T::ExternalOrigin::ensure_origin(origin)?; @@ -561,8 +759,17 @@ decl_module! { /// Schedule a majority-carries referendum to be tabled next once it is legal to schedule /// an external referendum. /// + /// The dispatch of this call must be `ExternalMajorityOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a /// pre-scheduled `external_propose` call. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] fn external_propose_majority(origin, proposal_hash: T::Hash) { T::ExternalMajorityOrigin::ensure_origin(origin)?; @@ -572,8 +779,17 @@ decl_module! { /// Schedule a negative-turnout-bias referendum to be tabled next once it is legal to /// schedule an external referendum. /// + /// The dispatch of this call must be `ExternalDefaultOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a /// pre-scheduled `external_propose` call. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] fn external_propose_default(origin, proposal_hash: T::Hash) { T::ExternalDefaultOrigin::ensure_origin(origin)?; @@ -584,11 +800,21 @@ decl_module! { /// immediately. If there is no externally-proposed referendum currently, or if there is one /// but it is not a majority-carries referendum then it fails. /// + /// The dispatch of this call must be `FastTrackOrigin`. + /// /// - `proposal_hash`: The hash of the current external proposal. /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to /// `EmergencyVotingPeriod` if too low. /// - `delay`: The number of block after voting has ended in approval and this should be /// enacted. This doesn't have a minimum amount. + /// + /// Emits `Started`. + /// + /// # + /// - One DB clear. + /// - One DB change. + /// - One extra DB entry. + /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] fn fast_track(origin, proposal_hash: T::Hash, @@ -611,6 +837,19 @@ decl_module! { } /// Veto and blacklist the external proposal hash. + /// + /// The dispatch origin of this call must be `VetoOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal to veto and blacklist. + /// + /// Emits `Vetoed`. + /// + /// # + /// - Two DB entries. + /// - One DB clear. + /// - Performs a binary search on `existing_vetoers` which should not + /// be very large. + /// # #[weight = SimpleDispatchInfo::FixedNormal(200_000)] fn veto_external(origin, proposal_hash: T::Hash) { let who = T::VetoOrigin::ensure_origin(origin)?; @@ -636,6 +875,14 @@ decl_module! { } /// Remove a referendum. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// - `ref_index`: The index of the referendum to cancel. + /// + /// # + /// - `O(1)`. + /// # #[weight = SimpleDispatchInfo::FixedOperational(10_000)] fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { ensure_root(origin)?; @@ -643,6 +890,14 @@ decl_module! { } /// Cancel a proposal queued for enactment. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// - `which`: The index of the referendum to cancel. + /// + /// # + /// - One DB change. + /// # #[weight = SimpleDispatchInfo::FixedOperational(10_000)] fn cancel_queued(origin, which: ReferendumIndex) { ensure_root(origin)?; @@ -659,46 +914,89 @@ decl_module! { } } - /// Specify a proxy. Called by the stash. + /// Specify a proxy that is already open to us. Called by the stash. + /// + /// NOTE: Used to be called `set_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proxy`: The account that will be activated as proxy. /// /// # /// - One extra DB entry. /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] - fn set_proxy(origin, proxy: T::AccountId) { + fn activate_proxy(origin, proxy: T::AccountId) { let who = ensure_signed(origin)?; - ensure!(!>::contains_key(&proxy), Error::::AlreadyProxy); - >::insert(proxy, who) + Proxy::::try_mutate(&proxy, |a| match a.take() { + None => Err(Error::::NotOpen), + Some(ProxyState::Active(_)) => Err(Error::::AlreadyProxy), + Some(ProxyState::Open(x)) if &x == &who => { + *a = Some(ProxyState::Active(who)); + Ok(()) + } + Some(ProxyState::Open(_)) => Err(Error::::WrongOpen), + })?; } /// Clear the proxy. Called by the proxy. /// + /// NOTE: Used to be called `resign_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// /// # /// - One DB clear. /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] - fn resign_proxy(origin) { + fn close_proxy(origin) { let who = ensure_signed(origin)?; - >::remove(who); + Proxy::::mutate(&who, |a| { + if a.is_some() { + system::Module::::dec_ref(&who); + } + *a = None; + }); } - /// Clear the proxy. Called by the stash. + /// Deactivate the proxy, but leave open to this account. Called by the stash. + /// + /// The proxy must already be active. + /// + /// NOTE: Used to be called `remove_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proxy`: The account that will be deactivated as proxy. /// /// # /// - One DB clear. /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] - fn remove_proxy(origin, proxy: T::AccountId) { + fn deactivate_proxy(origin, proxy: T::AccountId) { let who = ensure_signed(origin)?; - ensure!( - &Self::proxy(&proxy).ok_or(Error::::NotProxy)? == &who, - Error::::WrongProxy, - ); - >::remove(proxy); + Proxy::::try_mutate(&proxy, |a| match a.take() { + None | Some(ProxyState::Open(_)) => Err(Error::::NotActive), + Some(ProxyState::Active(x)) if &x == &who => { + *a = Some(ProxyState::Open(who)); + Ok(()) + } + Some(ProxyState::Active(_)) => Err(Error::::WrongProxy), + })?; } /// Delegate vote. /// + /// Currency is locked indefinitely for as long as it's delegated. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `to`: The account to make a delegate of the sender. + /// - `conviction`: The conviction that will be attached to the delegated + /// votes. + /// + /// Emits `Delegated`. + /// /// # /// - One extra DB entry. /// # @@ -719,6 +1017,14 @@ decl_module! { /// Undelegate vote. /// + /// Must be sent from an account that has called delegate previously. + /// The tokens will be reduced from an indefinite lock to the maximum + /// possible according to the conviction of the prior delegation. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// Emits `Undelegated`. + /// /// # /// - O(1). /// # @@ -740,7 +1046,14 @@ decl_module! { Self::deposit_event(RawEvent::Undelegated(who)); } - /// Veto and blacklist the proposal hash. Must be from Root origin. + /// Clears all public proposals. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// # + /// - `O(1)`. + /// - One DB clear. + /// # #[weight = SimpleDispatchInfo::FixedNormal(10_000)] fn clear_public_proposals(origin) { ensure_root(origin)?; @@ -750,6 +1063,17 @@ decl_module! { /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be /// in the dispatch queue but does require a deposit, returned once enacted. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// # + /// - Dependent on the size of `encoded_proposal` but protected by a + /// required deposit. + /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] fn note_preimage(origin, encoded_proposal: Vec) { let who = ensure_signed(origin)?; @@ -768,6 +1092,16 @@ decl_module! { /// Register the preimage for an upcoming proposal. This requires the proposal to be /// in the dispatch queue. No deposit is needed. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// # + /// - Dependent on the size of `encoded_proposal`. + /// # #[weight = SimpleDispatchInfo::FixedNormal(100_000)] fn note_imminent_preimage(origin, encoded_proposal: Vec) { let who = ensure_signed(origin)?; @@ -785,9 +1119,19 @@ decl_module! { /// Remove an expired proposal preimage and collect the deposit. /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proposal_hash`: The preimage hash of a proposal. + /// /// This will only work after `VotingPeriod` blocks from the time that the preimage was /// noted, if it's the same account doing it. If it's a different account, then it'll only /// work an additional `EnactmentPeriod` later. + /// + /// Emits `PreimageReaped`. + /// + /// # + /// - One DB clear. + /// # #[weight = SimpleDispatchInfo::FixedNormal(10_000)] fn reap_preimage(origin, proposal_hash: T::Hash) { let who = ensure_signed(origin)?; @@ -807,6 +1151,17 @@ decl_module! { Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, old, deposit, who)); } + /// Unlock tokens that have an expired lock. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `target`: The account to remove the lock on. + /// + /// Emits `Unlocked`. + /// + /// # + /// - `O(1)`. + /// # #[weight = SimpleDispatchInfo::FixedNormal(10_000)] fn unlock(origin, target: T::AccountId) { ensure_signed(origin)?; @@ -818,6 +1173,30 @@ decl_module! { Locks::::remove(&target); Self::deposit_event(RawEvent::Unlocked(target)); } + + /// Become a proxy. + /// + /// This must be called prior to a later `activate_proxy`. + /// + /// Origin must be a Signed. + /// + /// - `target`: The account whose votes will later be proxied. + /// + /// `close_proxy` must be called before the account can be destroyed. + /// + /// # + /// - One extra DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000)] + fn open_proxy(origin, target: T::AccountId) { + let who = ensure_signed(origin)?; + Proxy::::mutate(&who, |a| { + if a.is_none() { + system::Module::::inc_ref(&who); + } + *a = Some(ProxyState::Open(target)); + }); + } } } @@ -935,7 +1314,12 @@ impl Module { #[cfg(feature = "std")] pub fn force_proxy(stash: T::AccountId, proxy: T::AccountId) { - >::insert(proxy, stash) + Proxy::::mutate(&proxy, |o| { + if o.is_none() { + system::Module::::inc_ref(&proxy); + } + *o = Some(ProxyState::Active(stash)) + }) } /// Start a referendum. @@ -989,7 +1373,6 @@ impl Module { fn clear_referendum(ref_index: ReferendumIndex) { >::remove(ref_index); - LowestUnbaked::mutate(|i| if *i == ref_index { *i += 1; let end = ReferendumCount::get(); @@ -1162,12 +1545,6 @@ impl Module { } } -impl OnReapAccount for Module { - fn on_reap_account(who: &T::AccountId) { - >::remove(who) - } -} - #[cfg(test)] mod tests { use super::*; @@ -1178,7 +1555,7 @@ mod tests { }; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, Bounded, BadOrigin, OnInitialize}, + traits::{BlakeTwo256, IdentityLookup, Bounded, BadOrigin, OnRuntimeUpgrade}, testing::Header, Perbill, }; use pallet_balances::{BalanceLock, Error as BalancesError}; @@ -1229,7 +1606,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -1336,7 +1713,7 @@ mod tests { ]; s.top = data.into_iter().collect(); sp_io::TestExternalities::new(s).execute_with(|| { - Balances::on_initialize(1); + Balances::on_runtime_upgrade(); assert_eq!(Balances::free_balance(1), 5); assert_eq!(Balances::reserved_balance(1), 5); assert_eq!(Balances::usable_balance(&1), 2); @@ -1968,29 +2345,45 @@ mod tests { fn proxy_should_work() { new_test_ext().execute_with(|| { assert_eq!(Democracy::proxy(10), None); - assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(1)); + assert!(System::allow_death(&10)); + + assert_noop!(Democracy::activate_proxy(Origin::signed(1), 10), Error::::NotOpen); + + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert!(!System::allow_death(&10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + + assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::WrongOpen); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); // Can't set when already set. - assert_noop!(Democracy::set_proxy(Origin::signed(2), 10), Error::::AlreadyProxy); + assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::AlreadyProxy); // But this works because 11 isn't proxying. - assert_ok!(Democracy::set_proxy(Origin::signed(2), 11)); - assert_eq!(Democracy::proxy(10), Some(1)); - assert_eq!(Democracy::proxy(11), Some(2)); + assert_ok!(Democracy::open_proxy(Origin::signed(11), 2)); + assert_ok!(Democracy::activate_proxy(Origin::signed(2), 11)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); + assert_eq!(Democracy::proxy(11), Some(ProxyState::Active(2))); // 2 cannot fire 1's proxy: - assert_noop!(Democracy::remove_proxy(Origin::signed(2), 10), Error::::WrongProxy); + assert_noop!(Democracy::deactivate_proxy(Origin::signed(2), 10), Error::::WrongProxy); - // 1 fires his proxy: - assert_ok!(Democracy::remove_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), None); - assert_eq!(Democracy::proxy(11), Some(2)); + // 1 deactivates their proxy: + assert_ok!(Democracy::deactivate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + // but the proxy account cannot be killed until the proxy is closed. + assert!(!System::allow_death(&10)); - // 11 resigns: - assert_ok!(Democracy::resign_proxy(Origin::signed(11))); + // and then 10 closes it completely: + assert_ok!(Democracy::close_proxy(Origin::signed(10))); assert_eq!(Democracy::proxy(10), None); + assert!(System::allow_death(&10)); + + // 11 just closes without 2's "permission". + assert_ok!(Democracy::close_proxy(Origin::signed(11))); assert_eq!(Democracy::proxy(11), None); + assert!(System::allow_death(&11)); }); } @@ -2002,7 +2395,8 @@ mod tests { fast_forward_to(2); let r = 0; - assert_ok!(Democracy::set_proxy(Origin::signed(1), 10)); + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, AYE)); assert_eq!(Democracy::voters_for(r), vec![1]); diff --git a/frame/elections-phragmen/Cargo.toml b/frame/elections-phragmen/Cargo.toml index f3cfc800f01c015738304e2281ba6180ffa4b339..62ae091e9a41dea8fa039e48d329d571659b3852 100644 --- a/frame/elections-phragmen/Cargo.toml +++ b/frame/elections-phragmen/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "pallet-elections-phragmen" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME election pallet for PHRAGMEN" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-phragmen = { version = "2.0.0", default-features = false, path = "../../primitives/phragmen" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/phragmen" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } serde = { version = "1.0.101" } [features] diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index f250e771216e601f4129bd8043d7e86d97791bc3..55b7b3f1280d30df19545143cb7a8972df52f71c 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -690,6 +690,7 @@ impl Module { // split new set into winners and runners up. let split_point = desired_seats.min(new_set_with_stake.len()); let mut new_members = (&new_set_with_stake[..split_point]).to_vec(); + let most_popular = new_members.first().map(|x| x.0.clone()); // save the runners up as-is. They are sorted based on desirability. // sort and save the members. @@ -722,6 +723,7 @@ impl Module { &outgoing.clone(), &new_members_ids, ); + T::ChangeMembers::set_prime(most_popular); // outgoing candidates lose their bond. let mut to_burn_bond = outgoing.to_vec(); @@ -816,7 +818,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { @@ -864,6 +866,7 @@ mod tests { thread_local! { pub static MEMBERS: RefCell> = RefCell::new(vec![]); + pub static PRIME: RefCell> = RefCell::new(None); } pub struct TestChangeMembers; @@ -898,6 +901,11 @@ mod tests { assert_eq!(old_plus_incoming, new_plus_outgoing); MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + PRIME.with(|p| *p.borrow_mut() = None); + } + + fn set_prime(who: Option) { + PRIME.with(|p| *p.borrow_mut() = who); } } @@ -1250,6 +1258,30 @@ mod tests { }); } + #[test] + fn prime_works() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_ok!(Elections::vote(Origin::signed(3), vec![4, 5], 10)); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); + }); + } + #[test] fn cannot_vote_for_more_than_candidates() { ExtBuilder::default().build().execute_with(|| { diff --git a/frame/elections/Cargo.toml b/frame/elections/Cargo.toml index b7c98a65e15b45aa0a52113319102497236b58b5..e65f4e5d4627827380cbb06cc20732e0de40a5dc 100644 --- a/frame/elections/Cargo.toml +++ b/frame/elections/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-elections" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for elections" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] hex-literal = "0.2.1" -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index c5af6ba0456b95a0b0fe23e72588adf89489d70b..b82e73d512aa6eb254a1eda22ff801b7ad768a95 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -56,7 +56,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { diff --git a/frame/evm/Cargo.toml b/frame/evm/Cargo.toml index 66c809fa44c7ee76d148298e019fbade3aba8d60..76407d57e42846394039d3f9af9f5dfb70f2016d 100644 --- a/frame/evm/Cargo.toml +++ b/frame/evm/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "pallet-evm" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME EVM contracts pallet" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } +pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } primitive-types = { version = "0.6.2", default-features = false, features = ["rlp"] } rlp = { version = "0.4", default-features = false } evm = { version = "0.15", default-features = false } diff --git a/frame/example-offchain-worker/Cargo.toml b/frame/example-offchain-worker/Cargo.toml new file mode 100644 index 0000000000000000000000000000000000000000..ff8d780c3cff326c3b1fb749270dc8ec09c7043a --- /dev/null +++ b/frame/example-offchain-worker/Cargo.toml @@ -0,0 +1,33 @@ +[package] +name = "pallet-example-offchain-worker" +version = "2.0.0-alpha.3" +authors = ["Parity Technologies "] +edition = "2018" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet for offchain worker" + +[dependencies] +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +serde = { version = "1.0.101", optional = true } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +serde_json = { version = "1.0.46", default-features = false, features = ["alloc"] } + +[features] +default = ["std"] +std = [ + "codec/std", + "frame-support/std", + "frame-system/std", + "serde", + "sp-core/std", + "sp-io/std", + "sp-runtime/std", + "sp-std/std", +] diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..1c72a8be68653974803ee7702ae3aa53accdd1b5 --- /dev/null +++ b/frame/example-offchain-worker/src/lib.rs @@ -0,0 +1,548 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! # Offchain Worker Example Module +//! +//! The Offchain Worker Example: A simple pallet demonstrating +//! concepts, APIs and structures common to most offchain workers. +//! +//! Run `cargo doc --package pallet-example-offchain-worker --open` to view this module's +//! documentation. +//! +//! - \[`pallet_example_offchain_worker::Trait`](./trait.Trait.html) +//! - \[`Call`](./enum.Call.html) +//! - \[`Module`](./struct.Module.html) +//! +//! +//! \## Overview +//! +//! In this example we are going to build a very simplistic, naive and definitely NOT +//! production-ready oracle for BTC/USD price. +//! Offchain Worker (OCW) will be triggered after every block, fetch the current price +//! and prepare either signed or unsigned transaction to feed the result back on chain. +//! The on-chain logic will simply aggregate the results and store last `64` values to compute +//! the average price. +//! Additional logic in OCW is put in place to prevent spamming the network with both signed +//! and unsigned transactions, and custom `UnsignedValidator` makes sure that there is only +//! one unsigned transaction floating in the network. +#![cfg_attr(not(feature = "std"), no_std)] + +use frame_support::{ + debug, + dispatch::DispatchResult, decl_module, decl_storage, decl_event, + traits::Get, + weights::SimpleDispatchInfo, +}; +use frame_system::{self as system, ensure_signed, ensure_none, offchain}; +use serde_json as json; +use sp_core::crypto::KeyTypeId; +use sp_runtime::{ + offchain::{http, Duration, storage::StorageValueRef}, + traits::Zero, + transaction_validity::{InvalidTransaction, ValidTransaction, TransactionValidity}, +}; + +#[cfg(test)] +mod tests; + +/// Defines application identifier for crypto keys of this module. +/// +/// Every module that deals with signatures needs to declare its unique identifier for +/// its crypto keys. +/// When offchain worker is signing transactions it's going to request keys of type +/// `KeyTypeId` from the keystore and use the ones it finds to sign the transaction. +/// The keys can be inserted manually via RPC (see `author_insertKey`). +pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"btc!"); + +/// Based on the above `KeyTypeId` we need to generate a pallet-specific crypto type wrappers. +/// We can use from supported crypto kinds (`sr25519`, `ed25519` and `ecdsa`) and augment +/// the types with this pallet-specific identifier. +pub mod crypto { + use super::KEY_TYPE; + use sp_runtime::app_crypto::{app_crypto, sr25519}; + app_crypto!(sr25519, KEY_TYPE); +} + +/// This pallet's configuration trait +pub trait Trait: frame_system::Trait { + /// The type to sign and submit transactions. + type SubmitSignedTransaction: + offchain::SubmitSignedTransaction::Call>; + /// The type to submit unsigned transactions. + type SubmitUnsignedTransaction: + offchain::SubmitUnsignedTransaction::Call>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + /// The overarching dispatch call type. + type Call: From>; + + // Configuration parameters + + /// A grace period after we send transaction. + /// + /// To avoid sending too many transactions, we only attempt to send one + /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate + /// sending between distinct runs of this offchain worker. + type GracePeriod: Get; + + /// Number of blocks of cooldown after unsigned transaction is included. + /// + /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. + type UnsignedInterval: Get; +} + +decl_storage! { + trait Store for Module as Example { + /// A vector of recently submitted prices. + /// + /// This is used to calculate average price, should have bounded size. + Prices get(fn prices): Vec; + /// Defines the block when next unsigned transaction will be accepted. + /// + /// To prevent spam of unsigned (and unpayed!) transactions on the network, + /// we only allow one transaction every `T::UnsignedInterval` blocks. + /// This storage entry defines when new transaction is going to be accepted. + NextUnsignedAt get(fn next_unsigned_at): T::BlockNumber; + } +} + +decl_event!( + /// Events generated by the module. + pub enum Event where AccountId = ::AccountId { + /// Event generated when new price is accepted to contribute to the average. + NewPrice(u32, AccountId), + } +); + +decl_module! { + /// A public part of the pallet. + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Submit new price to the list. + /// + /// This method is a public function of the module and can be called from within + /// a transaction. It appends given `price` to current list of prices. + /// In our example the `offchain worker` will create, sign & submit a transaction that + /// calls this function passing the price. + /// + /// The transaction needs to be signed (see `ensure_signed`) check, so that the caller + /// pays a fee to execute it. + /// This makes sure that it's not easy (or rather cheap) to attack the chain by submitting + /// excesive transactions, but note that it doesn't ensure the price oracle is actually + /// working and receives (and provides) meaningful data. + /// This example is not focused on correctness of the oracle itself, but rather its + /// purpose is to showcase offchain worker capabilities. + #[weight = SimpleDispatchInfo::FixedNormal(10_000)] + pub fn submit_price(origin, price: u32) -> DispatchResult { + // Retrieve sender of the transaction. + let who = ensure_signed(origin)?; + // Add the price to the on-chain list. + Self::add_price(who, price); + Ok(()) + } + + /// Submit new price to the list via unsigned transaction. + /// + /// Works exactly like the `submit_price` function, but since we allow sending the + /// transaction without a signature, and hence without paying any fees, + /// we need a way to make sure that only some transactions are accepted. + /// This function can be called only once every `T::UnsignedInterval` blocks. + /// Transactions that call that function are de-duplicated on the pool level + /// via `validate_unsigned` implementation and also are rendered invalid if + /// the function has already been called in current "session". + /// + /// It's important to specify `weight` for unsigned calls as well, because even though + /// they don't charge fees, we still don't want a single block to contain unlimited + /// number of such transactions. + /// + /// This example is not focused on correctness of the oracle itself, but rather its + /// purpose is to showcase offchain worker capabilities. + #[weight = SimpleDispatchInfo::FixedNormal(10_000)] + pub fn submit_price_unsigned(origin, _block_number: T::BlockNumber, price: u32) + -> DispatchResult + { + // This ensures that the function can only be called via unsigned transaction. + ensure_none(origin)?; + // Add the price to the on-chain list, but mark it as coming from an empty address. + Self::add_price(Default::default(), price); + // now increment the block number at which we expect next unsigned transaction. + let current_block = >::block_number(); + >::put(current_block + T::UnsignedInterval::get()); + Ok(()) + } + + /// Offchain Worker entry point. + /// + /// By implementing `fn offchain_worker` within `decl_module!` you declare a new offchain + /// worker. + /// This function will be called when the node is fully synced and a new best block is + /// succesfuly imported. + /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might + /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), + /// so the code should be able to handle that. + /// You can use `Local Storage` API to coordinate runs of the worker. + fn offchain_worker(block_number: T::BlockNumber) { + // It's a good idea to add logs to your offchain workers. + // Using the `frame_support::debug` module you have access to the same API exposed by + // the `log` crate. + // Note that having logs compiled to WASM may cause the size of the blob to increase + // significantly. You can use `RuntimeDebug` custom derive to hide details of the types + // in WASM or use `debug::native` namespace to produce logs only when the worker is + // running natively. + debug::native::info!("Hello World from offchain workers!"); + + // Since off-chain workers are just part of the runtime code, they have direct access + // to the storage and other included pallets. + // + // We can easily import `frame_system` and retrieve a block hash of the parent block. + let parent_hash = >::block_hash(block_number - 1.into()); + debug::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + + // It's a good practice to keep `fn offchain_worker()` function minimal, and move most + // of the code to separate `impl` block. + // Here we call a helper function to calculate current average price. + // This function reads storage entries of the current state. + let average: Option = Self::average_price(); + debug::debug!("Current price: {:?}", average); + + // For this example we are going to send both signed and unsigned transactions + // depending on the block number. + // Usually it's enough to choose one or the other. + let should_send = Self::choose_transaction_type(block_number); + let res = match should_send { + TransactionType::Signed => Self::fetch_price_and_send_signed(), + TransactionType::Unsigned => Self::fetch_price_and_send_unsigned(block_number), + TransactionType::None => Ok(()), + }; + if let Err(e) = res { + debug::error!("Error: {}", e); + } + } + } +} + +enum TransactionType { + Signed, + Unsigned, + None, +} + +/// Most of the functions are moved outside of the `decl_module!` macro. +/// +/// This greatly helps with error messages, as the ones inside the macro +/// can sometimes be hard to debug. +impl Module { + /// Chooses which transaction type to send. + /// + /// This function serves mostly to showcase `StorageValue` helper + /// and local storage usage. + /// + /// Returns a type of transaction that should be produced in current run. + fn choose_transaction_type(block_number: T::BlockNumber) -> TransactionType { + /// A friendlier name for the error that is going to be returned in case we are in the grace + /// period. + const RECENTLY_SENT: () = (); + + // Start off by creating a reference to Local Storage value. + // Since the local storage is common for all offchain workers, it's a good practice + // to prepend your entry with the module name. + let val = StorageValueRef::persistent(b"example_ocw::last_send"); + // The Local Storage is persisted and shared between runs of the offchain workers, + // and offchain workers may run concurrently. We can use the `mutate` function, to + // write a storage entry in an atomic fashion. Under the hood it uses `compare_and_set` + // low-level method of local storage API, which means that only one worker + // will be able to "acquire a lock" and send a transaction if multiple workers + // happen to be executed concurrently. + let res = val.mutate(|last_send: Option>| { + // We match on the value decoded from the storage. The first `Option` + // indicates if the value was present in the storage at all, + // the second (inner) `Option` indicates if the value was succesfuly + // decoded to expected type (`T::BlockNumber` in our case). + match last_send { + // If we already have a value in storage and the block number is recent enough + // we avoid sending another transaction at this time. + Some(Some(block)) if block + T::GracePeriod::get() < block_number => { + Err(RECENTLY_SENT) + }, + // In every other case we attempt to acquire the lock and send a transaction. + _ => Ok(block_number) + } + }); + + // The result of `mutate` call will give us a nested `Result` type. + // The first one matches the return of the closure passed to `mutate`, i.e. + // if we return `Err` from the closure, we get an `Err` here. + // In case we return `Ok`, here we will have another (inner) `Result` that indicates + // if the value has been set to the storage correctly - i.e. if it wasn't + // written to in the meantime. + match res { + // The value has been set correctly, which means we can safely send a transaction now. + Ok(Ok(block_number)) => { + // Depending if the block is even or odd we will send a `Signed` or `Unsigned` + // transaction. + // Note that this logic doesn't really guarantee that the transactions will be sent + // in an alternating fashion (i.e. fairly distributed). Depending on the execution + // order and lock acquisition, we may end up for instance sending two `Signed` + // transactions in a row. If a strict order is desired, it's better to use + // the storage entry for that. (for instance store both block number and a flag + // indicating the type of next transaction to send). + let send_signed = block_number % 2.into() == Zero::zero(); + if send_signed { + TransactionType::Signed + } else { + TransactionType::Unsigned + } + }, + // We are in the grace period, we should not send a transaction this time. + Err(RECENTLY_SENT) => TransactionType::None, + // We wanted to send a transaction, but failed to write the block number (acquire a + // lock). This indicates that another offchain worker that was running concurrently + // most likely executed the same logic and succeeded at writing to storage. + // Thus we don't really want to send the transaction, knowing that the other run + // already did. + Ok(Err(_)) => TransactionType::None, + } + } + + /// A helper function to fetch the price and send signed transaction. + fn fetch_price_and_send_signed() -> Result<(), String> { + use system::offchain::SubmitSignedTransaction; + // Firstly we check if there are any accounts in the local keystore that are capable of + // signing the transaction. + // If not it doesn't even make sense to make external HTTP requests, since we won't be able + // to put the results back on-chain. + if !T::SubmitSignedTransaction::can_sign() { + return Err( + "No local accounts available. Consider adding one via `author_insertKey` RPC." + )? + } + + // Make an external HTTP request to fetch the current price. + // Note this call will block until response is received. + let price = Self::fetch_price().map_err(|e| format!("{:?}", e))?; + + // Received price is wrapped into a call to `submit_price` public function of this pallet. + // This means that the transaction, when executed, will simply call that function passing + // `price` as an argument. + let call = Call::submit_price(price); + + // Using `SubmitSignedTransaction` associated type we create and submit a transaction + // representing the call, we've just created. + // Submit signed will return a vector of results for all accounts that were found in the + // local keystore with expected `KEY_TYPE`. + let results = T::SubmitSignedTransaction::submit_signed(call); + for (acc, res) in &results { + match res { + Ok(()) => debug::info!("[{:?}] Submitted price of {} cents", acc, price), + Err(e) => debug::error!("[{:?}] Failed to submit transaction: {:?}", acc, e), + } + } + + Ok(()) + } + + /// A helper function to fetch the price and send unsigned transaction. + fn fetch_price_and_send_unsigned(block_number: T::BlockNumber) -> Result<(), String> { + use system::offchain::SubmitUnsignedTransaction; + // Make sure we don't fetch the price if unsigned transaction is going to be rejected + // anyway. + let next_unsigned_at = >::get(); + if next_unsigned_at > block_number { + return Err( + format!("Too early to send unsigned transaction. Next at: {:?}", next_unsigned_at) + )? + } + + // Make an external HTTP request to fetch the current price. + // Note this call will block until response is received. + let price = Self::fetch_price().map_err(|e| format!("{:?}", e))?; + + // Received price is wrapped into a call to `submit_price_unsigned` public function of this + // pallet. This means that the transaction, when executed, will simply call that function + // passing `price` as an argument. + let call = Call::submit_price_unsigned(block_number, price); + + // Now let's create an unsigned transaction out of this call and submit it to the pool. + // By default unsigned transactions are disallowed, so we need to whitelist this case + // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly + // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam + // attack vectors. See validation logic docs for more details. + T::SubmitUnsignedTransaction::submit_unsigned(call) + .map_err(|()| "Unable to submit unsigned transaction.".into()) + + } + + /// Fetch current price and return the result in cents. + fn fetch_price() -> Result { + // We want to keep the offchain worker execution time reasonable, so we set a hard-coded + // deadline to 2s to complete the external call. + // You can also wait idefinitely for the response, however you may still get a timeout + // coming from the host machine. + let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(2_000)); + // Initiate an external HTTP GET request. + // This is using high-level wrappers from `sp_runtime`, for the low-level calls that + // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but + // since we are running in a custom WASM execution environment we can't simply + // import the library here. + let request = http::Request::get( + "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD" + ); + // We set the deadline for sending of the request, note that awaiting response can + // have a separate deadline. Next we send the request, before that it's also possible + // to alter request headers or stream body content in case of non-GET requests. + let pending = request + .deadline(deadline) + .send() + .map_err(|_| http::Error::IoError)?; + + // The request is already being processed by the host, we are free to do anything + // else in the worker (we can send multiple concurrent requests too). + // At some point however we probably want to check the response though, + // so we can block current thread and wait for it to finish. + // Note that since the request is being driven by the host, we don't have to wait + // for the request to have it complete, we will just not read the response. + let response = pending.try_wait(deadline) + .map_err(|_| http::Error::DeadlineReached)??; + // Let's check the status code before we proceed to reading the response. + if response.code != 200 { + debug::warn!("Unexpected status code: {}", response.code); + return Err(http::Error::Unknown); + } + + // Next we want to fully read the response body and collect it to a vector of bytes. + // Note that the return object allows you to read the body in chunks as well + // with a way to control the deadline. + let body = response.body().collect::>(); + // Next we parse the response using `serde_json`. Even though it's possible to use + // `serde_derive` and deserialize to a struct it's not recommended due to blob size + // overhead introduced by such code. Deserializing to `json::Value` is much more + // lightweight and should be preferred, especially if we only care about a small number + // of properties from the response. + let val: Result = json::from_slice(&body); + // Let's parse the price as float value. Note that you should avoid using floats in the + // runtime, it's fine to do that in the offchain worker, but we do convert it to an integer + // before submitting on-chain. + let price = val.ok().and_then(|v| v.get("USD").and_then(|v| v.as_f64())); + let price = match price { + Some(pricef) => Ok((pricef * 100.) as u32), + None => { + let s = core::str::from_utf8(&body); + debug::warn!("Unable to extract price from the response: {:?}", s); + Err(http::Error::Unknown) + } + }?; + + debug::warn!("Got price: {} cents", price); + + Ok(price) + } + + /// Add new price to the list. + fn add_price(who: T::AccountId, price: u32) { + debug::info!("Adding to the average: {}", price); + Prices::mutate(|prices| { + const MAX_LEN: usize = 64; + + if prices.len() < MAX_LEN { + prices.push(price); + } else { + prices[price as usize % MAX_LEN] = price; + } + }); + + let average = Self::average_price() + .expect("The average is not empty, because it was just mutated; qed"); + debug::info!("Current average price is: {}", average); + // here we are raising the NewPrice event + Self::deposit_event(RawEvent::NewPrice(price, who)); + } + + /// Calculate current average price. + fn average_price() -> Option { + let prices = Prices::get(); + if prices.is_empty() { + None + } else { + Some(prices.iter().fold(0_u32, |a, b| a.saturating_add(*b)) / prices.len() as u32) + } + } +} + +#[allow(deprecated)] // ValidateUnsigned +impl frame_support::unsigned::ValidateUnsigned for Module { + type Call = Call; + + /// Validate unsigned call to this module. + /// + /// By default unsigned transactions are disallowed, but implementing the validator + /// here we make sure that some particular calls (the ones produced by offchain worker) + /// are being whitelisted and marked as valid. + fn validate_unsigned(call: &Self::Call) -> TransactionValidity { + // Firstly let's check that we call the right function. + if let Call::submit_price_unsigned(block_number, new_price) = call { + // Now let's check if the transaction has any chance to succeed. + let next_unsigned_at = >::get(); + if &next_unsigned_at > block_number { + return InvalidTransaction::Stale.into(); + } + // Let's make sure to reject transactions from the future. + let current_block = >::block_number(); + if ¤t_block < block_number { + return InvalidTransaction::Future.into(); + } + + // We prioritize transactions that are more far away from current average. + // + // Note this doesn't make much sense when building an actual oracle, but this example + // is here mostly to show off offchain workers capabilities, not about building an + // oracle. + let avg_price = Self::average_price() + .map(|price| if &price > new_price { price - new_price } else { new_price - price }) + .unwrap_or(0); + + Ok(ValidTransaction { + // We set base priority to 2**20 to make sure it's included before any other + // transactions in the pool. Next we tweak the priority depending on how much + // it differs from the current average. (the more it differs the more priority it + // has). + priority: (1 << 20) + avg_price as u64, + // This transaction does not require anything else to go before into the pool. + // In theory we could require `previous_unsigned_at` transaction to go first, + // but it's not necessary in our case. + requires: vec![], + // We set the `provides` tag to be the same as `next_unsigned_at`. This makes + // sure only one transaction produced after `next_unsigned_at` will ever + // get to the transaction pool and will end up in the block. + // We can still have multiple transactions compete for the same "spot", + // and the one with higher priority will replace other one in the pool. + provides: vec![codec::Encode::encode(&(KEY_TYPE.0, next_unsigned_at))], + // The transaction is only valid for next 5 blocks. After that it's + // going to be revalidated by the pool. + longevity: 5, + // It's fine to propagate that transaction to other peers, which means it can be + // created even by nodes that don't produce blocks. + // Note that sometimes it's better to keep it for yourself (if you are the block + // producer), since for instance in some schemes others may copy your solution and + // claim a reward. + propagate: true, + }) + } else { + InvalidTransaction::Call.into() + } + } +} diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..9b6a567a178403d9cb1b717705fa520820358bef --- /dev/null +++ b/frame/example-offchain-worker/src/tests.rs @@ -0,0 +1,210 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use crate::*; + +use codec::Decode; +use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, + weights::{GetDispatchInfo, Weight}, +}; +use sp_core::{ + H256, + offchain::{OffchainExt, TransactionPoolExt, testing}, + testing::KeyStore, + traits::KeystoreExt, +}; +use sp_runtime::{ + Perbill, RuntimeAppPublic, + testing::{Header, TestXt}, + traits::{BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicsT}, +}; + +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} +} + +// For testing the module, we construct most of a mock runtime. This means +// first constructing a configuration type (`Test`) which `impl`s each of the +// configuration traits of modules we want to use. +#[derive(Clone, Eq, PartialEq)] +pub struct Test; +parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); +} +impl frame_system::Trait for Test { + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); +} + +type Extrinsic = TestXt, ()>; +type SubmitTransaction = frame_system::offchain::TransactionSubmitter< + crypto::Public, + Test, + Extrinsic +>; + +impl frame_system::offchain::CreateTransaction for Test { + type Public = sp_core::sr25519::Public; + type Signature = sp_core::sr25519::Signature; + + fn create_transaction>( + call: ::Call, + _public: Self::Public, + _account: ::AccountId, + nonce: ::Index, + ) -> Option<(::Call, ::SignaturePayload)> { + Some((call, (nonce, ()))) + } +} + +parameter_types! { + pub const GracePeriod: u64 = 5; + pub const UnsignedInterval: u64 = 128; +} + +impl Trait for Test { + type Event = (); + type Call = Call; + type SubmitSignedTransaction = SubmitTransaction; + type SubmitUnsignedTransaction = SubmitTransaction; + type GracePeriod = GracePeriod; + type UnsignedInterval = UnsignedInterval; +} + +type Example = Module; + +#[test] +fn it_aggregates_the_price() { + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!(Example::average_price(), None); + + assert_ok!(Example::submit_price(Origin::signed(Default::default()), 27)); + assert_eq!(Example::average_price(), Some(27)); + + assert_ok!(Example::submit_price(Origin::signed(Default::default()), 43)); + assert_eq!(Example::average_price(), Some(35)); + }); +} + +#[test] +fn should_make_http_call_and_parse_result() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + price_oracle_response(&mut state.write()); + + t.execute_with(|| { + // when + let price = Example::fetch_price().unwrap(); + // then + assert_eq!(price, 15522); + }); +} + +#[test] +fn should_submit_signed_transaction_on_chain() { + const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + + let (offchain, offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + let keystore = KeyStore::new(); + keystore.write().sr25519_generate_new( + crate::crypto::Public::ID, + Some(&format!("{}/hunter1", PHRASE)) + ).unwrap(); + + + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + t.register_extension(KeystoreExt(keystore)); + + price_oracle_response(&mut offchain_state.write()); + + t.execute_with(|| { + // when + Example::fetch_price_and_send_signed().unwrap(); + // then + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature.unwrap().0, 0); + assert_eq!(tx.call, Call::submit_price(15522)); + }); +} + +#[test] +fn should_submit_unsigned_transaction_on_chain() { + let (offchain, offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + + price_oracle_response(&mut offchain_state.write()); + + t.execute_with(|| { + // when + Example::fetch_price_and_send_unsigned(1).unwrap(); + // then + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature, None); + assert_eq!(tx.call, Call::submit_price_unsigned(1, 15522)); + }); +} + +#[test] +fn weights_work() { + // must have a default weight. + let default_call = >::submit_price(10); + let info = default_call.get_dispatch_info(); + // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` + assert_eq!(info.weight, 10_000); +} + +fn price_oracle_response(state: &mut testing::OffchainState) { + state.expect_request(0, testing::PendingRequest { + method: "GET".into(), + uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), + response: Some(br#"{"USD": 155.23}"#.to_vec()), + sent: true, + ..Default::default() + }); +} diff --git a/frame/example/Cargo.toml b/frame/example/Cargo.toml index 0d37940fc73170feaae1eace009531ea9fb692d7..515d1fd8d7c7a10b805b43cba89793f7f1fb67af 100644 --- a/frame/example/Cargo.toml +++ b/frame/example/Cargo.toml @@ -1,22 +1,26 @@ [package] name = "pallet-example" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" -license = "GPL-3.0" +license = "Unlicense" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME example pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] @@ -24,6 +28,7 @@ std = [ "serde", "codec/std", "sp-runtime/std", + "frame-benchmarking/std", "frame-support/std", "frame-system/std", "pallet-balances/std", diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 39f0d25d3232a94f8fe4cec77ac59a78378f720f..826538ae582fe7e7b3073408cd1fd4042eccdf9c 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -258,7 +258,9 @@ use frame_support::{ dispatch::DispatchResult, decl_module, decl_storage, decl_event, weights::{SimpleDispatchInfo, DispatchInfo, DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee}, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_std::prelude::*; +use frame_benchmarking::{benchmarks, account}; +use frame_system::{self as system, ensure_signed, ensure_root, RawOrigin}; use codec::{Encode, Decode}; use sp_runtime::{ traits::{SignedExtension, Bounded, SaturatedConversion}, @@ -642,6 +644,42 @@ impl SignedExtension for WatchDummy { } } +benchmarks!{ + _ { + // Define a common range for `b`. + let b in 1 .. 1000 => (); + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + accumulate_dummy { + let b in ...; + let caller = account("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy { + let b in ...; + let caller = account("caller", 0, 0); + }: set_dummy (RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..10] range. + another_set_dummy { + let b in 1 .. 10; + let caller = account("caller", 0, 0); + }: set_dummy (RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in 0..x { + m.push(i); + } + }: { + m.sort(); + } +} + #[cfg(test)] mod tests { use super::*; @@ -690,7 +728,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = pallet_balances::Module; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/executive/Cargo.toml b/frame/executive/Cargo.toml index 514fb68d6dbc06b64f849ebe8ef35731b2a22e65..deafb0cadd5d560c321eec62f3855b0e2fbc4aa4 100644 --- a/frame/executive/Cargo.toml +++ b/frame/executive/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "frame-executive" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME executives engine" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } serde = { version = "1.0.101", optional = true } -sp-io ={ path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } [dev-dependencies] hex-literal = "0.2.1" -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-indices = { version = "2.0.0", path = "../indices" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-transaction-payment = { version = "2.0.0", path = "../transaction-payment" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-indices = { version = "2.0.0-alpha.2", path = "../indices" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-transaction-payment = { version = "2.0.0-alpha.2", path = "../transaction-payment" } [features] default = ["std"] diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 792643919d7f6bb69e53e81731e95db056beeb15..7bf39989ecc728a7c7ddf5cacc98c7a9077e1c4d 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -60,9 +60,7 @@ //! # pub type AllModules = u64; //! # pub enum Runtime {}; //! # use sp_runtime::transaction_validity::{TransactionValidity, UnknownTransaction}; -//! # #[allow(deprecated)] //! # use sp_runtime::traits::ValidateUnsigned; -//! # #[allow(deprecated)] //! # impl ValidateUnsigned for Runtime { //! # type Call = (); //! # @@ -77,18 +75,20 @@ #![cfg_attr(not(feature = "std"), no_std)] use sp_std::{prelude::*, marker::PhantomData}; -use frame_support::weights::{GetDispatchInfo, WeighBlock, DispatchInfo}; +use frame_support::{ + storage::StorageValue, + weights::{GetDispatchInfo, WeighBlock, DispatchInfo} +}; use sp_runtime::{ generic::Digest, ApplyExtrinsicResult, traits::{ self, Header, Zero, One, Checkable, Applyable, CheckEqual, OnFinalize, OnInitialize, - NumberFor, Block as BlockT, OffchainWorker, Dispatchable, Saturating, + NumberFor, Block as BlockT, OffchainWorker, Dispatchable, Saturating, OnRuntimeUpgrade, }, transaction_validity::TransactionValidity, }; use sp_runtime::generic::CheckSignature; -#[allow(deprecated)] use sp_runtime::traits::ValidateUnsigned; use codec::{Codec, Encode}; use frame_system::{extrinsics_root, DigestOf}; @@ -107,13 +107,13 @@ pub struct Executive( PhantomData<(System, Block, Context, UnsignedValidator, AllModules)> ); -#[allow(deprecated)] // Allow ValidateUnsigned, remove the attribute when the trait is removed. impl< System: frame_system::Trait, Block: traits::Block, Context: Default, UnsignedValidator, AllModules: + OnRuntimeUpgrade + OnInitialize + OnFinalize + OffchainWorker + @@ -122,7 +122,7 @@ impl< where Block::Extrinsic: Checkable + Codec, CheckedOf: - Applyable + + Applyable + GetDispatchInfo, CallOf: Dispatchable, OriginOf: From>, @@ -133,13 +133,13 @@ where } } -#[allow(deprecated)] // Allow ValidateUnsigned, remove the attribute when the trait is removed. impl< System: frame_system::Trait, Block: traits::Block, Context: Default, UnsignedValidator, AllModules: + OnRuntimeUpgrade + OnInitialize + OnFinalize + OffchainWorker + @@ -148,7 +148,7 @@ impl< where Block::Extrinsic: Checkable + Codec, CheckedOf: - Applyable + + Applyable + GetDispatchInfo, CallOf: Dispatchable, OriginOf: From>, @@ -181,6 +181,12 @@ where extrinsics_root: &System::Hash, digest: &Digest, ) { + if frame_system::RuntimeUpgraded::take() { + ::on_runtime_upgrade(); + >::register_extra_weight_unchecked( + >::on_runtime_upgrade() + ); + } >::initialize( block_number, parent_hash, @@ -357,6 +363,11 @@ where &digests, frame_system::InitKind::Inspection, ); + + // Initialize logger, so the log messages are visible + // also when running WASM. + frame_support::debug::RuntimeLogger::init(); + >::offchain_worker( // to maintain backward compatibility we call module offchain workers // with parent block number. @@ -469,7 +480,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -496,7 +507,6 @@ mod tests { } impl custom::Trait for Runtime {} - #[allow(deprecated)] impl ValidateUnsigned for Runtime { type Call = Call; @@ -541,7 +551,7 @@ mod tests { pallet_balances::GenesisConfig:: { balances: vec![(1, 211)], }.assimilate_storage(&mut t).unwrap(); - let xt = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(2, 69))); + let xt = TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(2, 69))); let weight = xt.get_dispatch_info().weight as u64; let mut t = sp_io::TestExternalities::new(t); t.execute_with(|| { @@ -574,7 +584,7 @@ mod tests { header: Header { parent_hash: [69u8; 32].into(), number: 1, - state_root: hex!("96797237079b6d6ffab7a47f90ee257a439a0e8268bdab3fe2f1e52572b101de").into(), + state_root: hex!("8a22606e925c39bb0c8e8f6f5871c0aceab88a2fcff6b2d92660af8f6daff0b1").into(), extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), digest: Digest { logs: vec![], }, }, @@ -621,7 +631,7 @@ mod tests { fn bad_extrinsic_not_inserted() { let mut t = new_test_ext(1); // bad nonce check! - let xt = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 30, 0), Call::Balances(BalancesCall::transfer(33, 69))); + let xt = TestXt::new_signed(sign_extra(1, 30, 0), Call::Balances(BalancesCall::transfer(33, 69))); t.execute_with(|| { Executive::initialize_block(&Header::new( 1, @@ -639,7 +649,7 @@ mod tests { fn block_weight_limit_enforced() { let mut t = new_test_ext(10000); // given: TestXt uses the encoded len as fixed Len: - let xt = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))); + let xt = TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))); let encoded = xt.encode(); let encoded_len = encoded.len() as Weight; let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - 175; @@ -656,7 +666,7 @@ mod tests { assert_eq!(>::all_extrinsics_weight(), 175); for nonce in 0..=num_to_exhaust_block { - let xt = sp_runtime::testing::TestXt::new_signed( + let xt = TestXt::new_signed( sign_extra(1, nonce.into(), 0), Call::Balances(BalancesCall::transfer(33, 0)), ); let res = Executive::apply_extrinsic(xt); @@ -676,14 +686,14 @@ mod tests { #[test] fn block_weight_and_size_is_stored_per_tx() { - let xt = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))); - let x1 = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 1, 0), Call::Balances(BalancesCall::transfer(33, 0))); - let x2 = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 2, 0), Call::Balances(BalancesCall::transfer(33, 0))); + let xt = TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))); + let x1 = TestXt::new_signed(sign_extra(1, 1, 0), Call::Balances(BalancesCall::transfer(33, 0))); + let x2 = TestXt::new_signed(sign_extra(1, 2, 0), Call::Balances(BalancesCall::transfer(33, 0))); let len = xt.clone().encode().len() as u32; let mut t = new_test_ext(1); t.execute_with(|| { assert_eq!(>::all_extrinsics_weight(), 0); - assert_eq!(>::all_extrinsics_weight(), 0); + assert_eq!(>::all_extrinsics_len(), 0); assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); @@ -696,13 +706,13 @@ mod tests { let _ = >::finalize(); assert_eq!(>::all_extrinsics_weight(), 0); - assert_eq!(>::all_extrinsics_weight(), 0); + assert_eq!(>::all_extrinsics_len(), 0); }); } #[test] fn validate_unsigned() { - let xt = sp_runtime::testing::TestXt::new_unsigned(Call::Balances(BalancesCall::set_balance(33, 69, 69))); + let xt = TestXt::new_unsigned(Call::Balances(BalancesCall::set_balance(33, 69, 69))); let mut t = new_test_ext(1); t.execute_with(|| { @@ -711,9 +721,26 @@ mod tests { }); } + #[test] + fn unsigned_weight_is_noted_when_applied() { + let xt = TestXt::new_unsigned(Call::Balances(BalancesCall::set_balance(33, 69, 69))); + let len = xt.clone().encode().len() as u32; + let mut t = new_test_ext(1); + t.execute_with(|| { + assert_eq!(>::all_extrinsics_weight(), 0); + assert_eq!(>::all_extrinsics_len(), 0); + + // This is okay -- balances transfer will panic since it requires ensure_signed. + assert_eq!(Executive::apply_extrinsic(xt), Ok(Err(DispatchError::BadOrigin))); + + assert_eq!(>::all_extrinsics_weight(), len); + assert_eq!(>::all_extrinsics_len(), len); + }); + } + #[test] fn apply_trusted_skips_signature_check_but_not_others() { - let xt1 = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))) + let xt1 = TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))) .badly_signed(); let mut t = new_test_ext(1); @@ -722,7 +749,7 @@ mod tests { assert_eq!(Executive::apply_trusted_extrinsic(xt1), Ok(Ok(()))); }); - let xt2 = sp_runtime::testing::TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))) + let xt2 = TestXt::new_signed(sign_extra(1, 0, 0), Call::Balances(BalancesCall::transfer(33, 0))) .invalid(TransactionValidityError::Invalid(InvalidTransaction::Call)); t.execute_with(|| { @@ -745,7 +772,7 @@ mod tests { 110, lock, ); - let xt = sp_runtime::testing::TestXt::new_signed( + let xt = TestXt::new_signed( sign_extra(1, 0, 0), Call::System(SystemCall::remark(vec![1u8])), ); diff --git a/frame/finality-tracker/Cargo.toml b/frame/finality-tracker/Cargo.toml index 7c87df4e54811f2d4dfe2a25f8e14d3409d225d3..1313080dfe0044ce3d0ee35ad7fcf895c071b4ab 100644 --- a/frame/finality-tracker/Cargo.toml +++ b/frame/finality-tracker/Cargo.toml @@ -1,24 +1,29 @@ [package] name = "pallet-finality-tracker" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Pallet that tracks the last finalized block, as perceived by block authors." +documentation = "https://docs.rs/pallet-finality-tracker" + [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-finality-tracker = { version = "2.0.0", default-features = false, path = "../../primitives/finality-tracker" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/finality-tracker" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index ef6e0d9a4bbbea11fa078842491086aebdd4d9a9..08056a34ab028f759ccae0705065c80d3283d689 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -263,7 +263,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } parameter_types! { pub const WindowSize: u64 = 11; diff --git a/frame/generic-asset/Cargo.toml b/frame/generic-asset/Cargo.toml index cd236d7b7d0aa6186e956b34e12a844ae7cc5e6e..a8df92e3c61706c356117ff8c8403fe7b37ac8e0 100644 --- a/frame/generic-asset/Cargo.toml +++ b/frame/generic-asset/Cargo.toml @@ -1,29 +1,32 @@ [package] name = "pallet-generic-asset" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Centrality Developers "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for generic asset management" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] std =[ - "serde/std", - "codec/std", - "sp-std/std", - "sp-runtime/std", - "frame-support/std", - "frame-system/std", + "serde/std", + "codec/std", + "sp-std/std", + "sp-runtime/std", + "frame-support/std", + "frame-system/std", ] diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index e98fc32ecbe9a019f796be767dbe2af2e35d58c5..f1713dd586a93a87b69565fdd1cb9bd9c38c1e02 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -1124,7 +1124,7 @@ impl frame_system::Trait for ElevatedTrait { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for ElevatedTrait { type Balance = T::Balance; diff --git a/frame/generic-asset/src/mock.rs b/frame/generic-asset/src/mock.rs index 7a61a61c430ac484c3985d358ac6d10b6768b544..8db140d90c666359cf21092a3fc4c9f49217d1e9 100644 --- a/frame/generic-asset/src/mock.rs +++ b/frame/generic-asset/src/mock.rs @@ -64,7 +64,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for Test { diff --git a/frame/grandpa/Cargo.toml b/frame/grandpa/Cargo.toml index e67e64dd5f339532b5b7ff725a42964c005edb35..f5cce65fe906a55ca71bab5287b0f3ec69670d95 100644 --- a/frame/grandpa/Cargo.toml +++ b/frame/grandpa/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "pallet-grandpa" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for GRANDPA finality gadget" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-finality-grandpa = { version = "2.0.0", default-features = false, path = "../../primitives/finality-grandpa" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -pallet-finality-tracker = { version = "2.0.0", default-features = false, path = "../finality-tracker" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-finality-grandpa = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/finality-grandpa" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } +pallet-finality-tracker = { version = "2.0.0-alpha.2", default-features = false, path = "../finality-tracker" } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 6d3223f094859ffd8b3ccc07314026b0803a6e27..3210627f915229bce907c0296749c5620bd241b2 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -481,7 +481,6 @@ struct GrandpaTimeSlot { // TODO [slashing]: Integrate this. /// A grandpa equivocation offence report. -#[allow(dead_code)] struct GrandpaEquivocationOffence { /// Time slot at which this incident happened. time_slot: GrandpaTimeSlot, diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 77a19c7626594414d48c7897259ca9c741bf79a3..8b94becd5aa6a123f846325b20e6e9632e8d67a6 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -67,7 +67,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } mod grandpa { diff --git a/frame/identity/Cargo.toml b/frame/identity/Cargo.toml index c95d23023089530d9a63cda2daef4c4290649831..3dbffedffcdc3ea332807f8542989e04121c92f4 100644 --- a/frame/identity/Cargo.toml +++ b/frame/identity/Cargo.toml @@ -1,24 +1,27 @@ [package] name = "pallet-identity" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME identity management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] @@ -32,3 +35,4 @@ std = [ "frame-support/std", "frame-system/std", ] +runtime-benchmarks = ["frame-benchmarking"] diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index c208d3271774a9baa04b6625889761a681b37bac..b7a81956a55c5044d49a126c8f354fef951802f8 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -20,11 +20,8 @@ use super::*; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; -use frame_benchmarking::{ - BenchmarkResults, BenchmarkParameter, selected_benchmark, benchmarking, Benchmarking, - BenchmarkingSetup, -}; -use sp_runtime::traits::{Bounded, Dispatchable}; +use frame_benchmarking::benchmarks; +use sp_runtime::traits::Bounded; use crate::Module as Identity; @@ -90,150 +87,89 @@ fn create_identity_info(num_fields: u32) -> IdentityInfo { return info } -// Benchmark `add_registrar` extrinsic. -struct AddRegistrar; -impl BenchmarkingSetup, RawOrigin> for AddRegistrar { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Add r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; - - // Return the `add_registrar` r + 1 call - Ok((crate::Call::::add_registrar(account::("registrar", r + 1)), RawOrigin::Root)) - } -} - -// Benchmark `set_identity` extrinsic. -struct SetIdentity; -impl BenchmarkingSetup, RawOrigin> for SetIdentity { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()) - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Add r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; - - // The target user - let caller = account::("caller", r); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - // Add an initial identity - let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), initial_info)?; - - // User requests judgement from all the registrars, and they approve - for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; - Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), - i, - caller_lookup.clone(), - Judgement::Reasonable - )?; - } - - // Create identity info with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - // 32 byte data that we reuse below - let info = create_identity_info::(x); - - // Return the `set_identity` call - Ok((crate::Call::::set_identity(info), RawOrigin::Signed(caller))) +benchmarks! { + // These are the common parameters along with their instancing. + _ { + let r in 1 .. MAX_REGISTRARS => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get() => { + // Give them s many sub accounts + let caller = account::("caller", 0); + let _ = add_sub_accounts::(caller, s)?; + }; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller = account::("caller", 0); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, info)?; + }; } -} -// Benchmark `set_subs` extrinsic. -struct SetSubs; -impl BenchmarkingSetup, RawOrigin> for SetSubs { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Subs Count - (BenchmarkParameter::S, 1, T::MaxSubAccounts::get()), - ] - } + add_registrar { + let r in ...; + }: _(RawOrigin::Root, account::("registrar", r + 1)) + + set_identity { + let r in ...; + // This X doesn't affect the caller ID up front like with the others, so we don't use the + // standard preparation. + let x in _ .. _ => (); + let caller = { + // The target user + let caller = account::("caller", 0); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + // Add an initial identity + let initial_info = create_identity_info::(1); + Identity::::set_identity(caller_origin.clone(), initial_info)?; + + // User requests judgement from all the registrars, and they approve + for i in 0..r { + Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(account::("registrar", i)).into(), + i, + caller_lookup.clone(), + Judgement::Reasonable + )?; + } + caller + }; + }: _( + RawOrigin::Signed(caller), + create_identity_info::(x) + ) - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Generic data to be used. - let data = Data::Raw(vec![0; 32]); + set_subs { + let s in ...; - // The target user let caller = account::("caller", 0); let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); // Create their main identity let info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), info)?; - - // Give them s many sub accounts - let s = components.iter().find(|&c| c.0 == BenchmarkParameter::S).unwrap().1; - let mut subs = add_sub_accounts::(caller.clone(), s)?; - + Identity::::set_identity(caller_origin, info)?; + }: _(RawOrigin::Signed(caller), { + let mut subs = Module::::subs(&caller); + // Generic data to be used. + let data = Data::Raw(vec![0; 32]); // Create an s+1 sub account to add - subs.push((account::("sub", s+1), data)); - - // Return the `set_subs` call - Ok((crate::Call::::set_subs(subs), RawOrigin::Signed(caller))) - } -} - -// Benchmark `clear_identity` extrinsic. -struct ClearIdentity; -impl BenchmarkingSetup, RawOrigin> for ClearIdentity { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Subs Count - (BenchmarkParameter::S, 1, T::MaxSubAccounts::get()), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()), - ] - } + subs.push((account::("sub", s + 1), data)); + subs + }) - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + clear_identity { let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_lookup = ::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; - - // Create their main identity with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - let info = create_identity_info::(x); - Identity::::set_identity(caller_origin.clone(), info)?; - - // Give them s many sub accounts - let s = components.iter().find(|&c| c.0 == BenchmarkParameter::S).unwrap().1; - let _ = add_sub_accounts::(caller.clone(), s)?; + let r in ...; + let s in ...; + let x in ...; // User requests judgement from all the registrars, and they approve for i in 0..r { @@ -245,260 +181,87 @@ impl BenchmarkingSetup, RawOrigin> for Judgement::Reasonable )?; } + }: _(RawOrigin::Signed(caller)) - // Return the `clear_identity` call - Ok((crate::Call::::clear_identity(), RawOrigin::Signed(caller))) - } -} - -// Benchmark `request_judgement` extrinsic. -struct RequestJudgement; -impl BenchmarkingSetup, RawOrigin> for RequestJudgement { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + request_judgement { let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; + let r in ...; + let x in ...; + }: _(RawOrigin::Signed(caller), r - 1, 10.into()) - // Create their main identity with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - let info = create_identity_info::(x); - Identity::::set_identity(caller_origin.clone(), info)?; - - // Return the `request_judgement` call - Ok((crate::Call::::request_judgement(r-1, 10.into()), RawOrigin::Signed(caller))) - } -} - -// Benchmark `cancel_request` extrinsic. -struct CancelRequest; -impl BenchmarkingSetup, RawOrigin> for CancelRequest { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + cancel_request { let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; + let r in ...; + let x in ...; - // Create their main identity with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - let info = create_identity_info::(x); - Identity::::set_identity(caller_origin.clone(), info)?; - - // Request judgement - Identity::::request_judgement(caller_origin.clone(), r-1, 10.into())?; - - Ok((crate::Call::::cancel_request(r-1), RawOrigin::Signed(caller))) - } -} - -// Benchmark `set_fee` extrinsic. -struct SetFee; -impl BenchmarkingSetup, RawOrigin> for SetFee { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - ] - } + Identity::::request_judgement(caller_origin, r - 1, 10.into())?; + }: _(RawOrigin::Signed(caller), r - 1) - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + set_fee { let caller = account::("caller", 0); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; + let r in ...; - // Add caller as registrar Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + }: _(RawOrigin::Signed(caller), r, 10.into()) - // Return `set_fee` call - Ok((crate::Call::::set_fee(r, 10.into()), RawOrigin::Signed(caller))) - } -} - -// Benchmark `set_account_id` extrinsic. -struct SetAccountId; -impl BenchmarkingSetup, RawOrigin> for SetAccountId { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + set_account_id { let caller = account::("caller", 0); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; + let r in ...; - // Add caller as registrar Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + }: _(RawOrigin::Signed(caller), r, account::("new", 0)) - // Return `set_account_id` call - Ok((crate::Call::::set_account_id(r, account::("new", 0)), RawOrigin::Signed(caller))) - } -} - -// Benchmark `set_fields` extrinsic. -struct SetFields; -impl BenchmarkingSetup, RawOrigin> for SetFields { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + set_fields { let caller = account::("caller", 0); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; + let r in ...; - // Add caller as registrar Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - let fields = IdentityFields( IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter ); + }: _(RawOrigin::Signed(caller), r, fields) - // Return `set_account_id` call - Ok((crate::Call::::set_fields(r, fields), RawOrigin::Signed(caller))) - } -} - -// Benchmark `provide_judgement` extrinsic.g -struct ProvideJudgement; -impl BenchmarkingSetup, RawOrigin> for ProvideJudgement { - - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // Add r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; - + provide_judgement { // The user let user = account::("user", r); - let user_origin: ::Origin = RawOrigin::Signed(user.clone()).into(); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_lookup = ::unlookup(user.clone()); let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); - // Create their main identity with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - let info = create_identity_info::(x); - Identity::::set_identity(user_origin.clone(), info)?; - - // The caller registrar - let caller = account::("caller", r); + let caller = account::("caller", 0); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Add caller as registrar - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let r in ...; + // For this x, it's the user identity that gts the fields, not the caller. + let x in _ .. _ => { + let info = create_identity_info::(x); + Identity::::set_identity(user_origin.clone(), info)?; + }; - // User requests judgement from caller registrar + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; Identity::::request_judgement(user_origin.clone(), r, 10.into())?; + }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) - // Return `provide_judgement` call - Ok((crate::Call::::provide_judgement( - r, - user_lookup.clone(), - Judgement::Reasonable - ), RawOrigin::Signed(caller))) - } -} - -// Benchmark `kill_identity` extrinsic. -struct KillIdentity; -impl BenchmarkingSetup, RawOrigin> for KillIdentity { - - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Registrar Count - (BenchmarkParameter::R, 1, MAX_REGISTRARS), - // Subs Count - (BenchmarkParameter::S, 1, T::MaxSubAccounts::get()), - // Additional Field Count - (BenchmarkParameter::X, 1, T::MaxAdditionalFields::get()), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(crate::Call, RawOrigin), &'static str> - { - // The target user + kill_identity { let caller = account::("caller", 0); let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - // Register r registrars - let r = components.iter().find(|&c| c.0 == BenchmarkParameter::R).unwrap().1; - add_registrars::(r)?; - - // Create their main identity with x additional fields - let x = components.iter().find(|&c| c.0 == BenchmarkParameter::X).unwrap().1; - let info = create_identity_info::(x); - Identity::::set_identity(caller_origin.clone(), info)?; - - // Give them s many sub accounts - let s = components.iter().find(|&c| c.0 == BenchmarkParameter::S).unwrap().1; - let _ = add_sub_accounts::(caller.clone(), s)?; + let r in ...; + let s in ...; + let x in ...; // User requests judgement from all the registrars, and they approve for i in 0..r { @@ -510,86 +273,5 @@ impl BenchmarkingSetup, RawOrigin> for Judgement::Reasonable )?; } - - // Return the `kill_identity` call - Ok((crate::Call::::kill_identity(caller_lookup), RawOrigin::Root)) - } -} - -// The list of available benchmarks for this pallet. -selected_benchmark!( - AddRegistrar, - SetIdentity, - SetSubs, - ClearIdentity, - RequestJudgement, - CancelRequest, - SetFee, - SetAccountId, - SetFields, - ProvideJudgement, - KillIdentity -); - -impl Benchmarking for Module { - fn run_benchmark(extrinsic: Vec, steps: u32, repeat: u32) -> Result, &'static str> { - // Map the input to the selected benchmark. - let selected_benchmark = match extrinsic.as_slice() { - b"add_registrar" => SelectedBenchmark::AddRegistrar, - b"set_identity" => SelectedBenchmark::SetIdentity, - b"set_subs" => SelectedBenchmark::SetSubs, - b"clear_identity" => SelectedBenchmark::ClearIdentity, - b"request_judgement" => SelectedBenchmark::RequestJudgement, - b"cancel_request" => SelectedBenchmark::CancelRequest, - b"set_fee" => SelectedBenchmark::SetFee, - b"set_account_id" => SelectedBenchmark::SetAccountId, - b"set_fields" => SelectedBenchmark::SetFields, - b"provide_judgement" => SelectedBenchmark::ProvideJudgement, - b"kill_identity" => SelectedBenchmark::KillIdentity, - _ => return Err("Could not find extrinsic."), - }; - - // Warm up the DB - benchmarking::commit_db(); - benchmarking::wipe_db(); - - // first one is set_identity. - let components = , RawOrigin>>::components(&selected_benchmark); - // results go here - let mut results: Vec = Vec::new(); - // Select the component we will be benchmarking. Each component will be benchmarked. - for (name, low, high) in components.iter() { - // Create up to `STEPS` steps for that component between high and low. - let step_size = ((high - low) / steps).max(1); - let num_of_steps = (high - low) / step_size; - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = low + step_size * s; - - // Select the mid value for all the other components. - let c: Vec<(BenchmarkParameter, u32)> = components.iter() - .map(|(n, l, h)| - (*n, if n == name { component_value } else { (h - l) / 2 + l }) - ).collect(); - - // Run the benchmark `repeat` times. - for _ in 0..repeat { - // Set up the externalities environment for the setup we want to benchmark. - let (call, caller) = , RawOrigin>>::instance(&selected_benchmark, &c)?; - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - benchmarking::commit_db(); - // Run the benchmark. - let start = benchmarking::current_time(); - call.dispatch(caller.into())?; - let finish = benchmarking::current_time(); - let elapsed = finish - start; - results.push((c.clone(), elapsed)); - // Wipe the DB back to the genesis state. - benchmarking::wipe_db(); - } - } - } - return Ok(results); - } + }: _(RawOrigin::Root, caller_lookup) } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index 895efa1c819cd5fce3c46019b6408a2adc59c2b8..cb2071d5d9ea872876d1a66e8c432341333de0b5 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -78,6 +78,7 @@ use frame_support::{ }; use frame_system::{self as system, ensure_signed, ensure_root}; +#[cfg(feature = "runtime-benchmarks")] pub mod benchmarking; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -394,7 +395,7 @@ decl_storage! { /// Alternative "sub" identities of this account. /// /// The first item is the deposit, the second is a vector of the accounts. - pub SubsOf get(fn subs): + pub SubsOf get(fn subs_of): map hasher(blake2_256) T::AccountId => (BalanceOf, Vec); /// The set of registrars. Not expected to get very big as can only be added through a @@ -875,6 +876,16 @@ decl_module! { } } +impl Module { + /// Get the subs of an account. + pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { + SubsOf::::get(who).1 + .into_iter() + .filter_map(|a| SuperOf::::get(&a).map(|x| (a, x.1))) + .collect() + } +} + #[cfg(test)] mod tests { use super::*; @@ -926,7 +937,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; @@ -1097,14 +1108,14 @@ mod tests { assert_ok!(Identity::set_identity(Origin::signed(10), ten())); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Identity::subs(10), (10, vec![20])); + assert_eq!(Identity::subs_of(10), (10, vec![20])); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); // push another item and re-set it. subs.push((30, Data::Raw(vec![50; 1]))); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); - assert_eq!(Identity::subs(10), (20, vec![20, 30])); + assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); @@ -1112,7 +1123,7 @@ mod tests { subs[0] = (40, Data::Raw(vec![60; 1])); assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); assert_eq!(Balances::free_balance(10), 70); // no change in the balance - assert_eq!(Identity::subs(10), (20, vec![40, 30])); + assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); assert_eq!(Identity::super_of(20), None); assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); @@ -1120,7 +1131,7 @@ mod tests { // clear assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); assert_eq!(Balances::free_balance(10), 90); - assert_eq!(Identity::subs(10), (0, vec![])); + assert_eq!(Identity::subs_of(10), (0, vec![])); assert_eq!(Identity::super_of(30), None); assert_eq!(Identity::super_of(40), None); diff --git a/frame/im-online/Cargo.toml b/frame/im-online/Cargo.toml index 46bff2dc3588f3a56f9c6d14ca56ee5c9f8ec5a6..ab8c066945480dee7feaf11fb14873e71695bfa0 100644 --- a/frame/im-online/Cargo.toml +++ b/frame/im-online/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-im-online" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME's I'm online pallet" [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } +pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -pallet-session = { version = "2.0.0", default-features = false, path = "../session" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.2", default-features = false, path = "../session" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [features] default = ["std", "pallet-session/historical"] diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index a050ad3d8a98d5360344a8314bcc979f1327b987..9aa8d2d67f6feb6d1ef5c0d18b2f30280d60b678 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -341,8 +341,6 @@ decl_module! { // Runs after every block. fn offchain_worker(now: T::BlockNumber) { - debug::RuntimeLogger::init(); - // Only send messages if we are a potential validator. if sp_io::offchain::is_validator() { for res in Self::send_heartbeats(now).into_iter().flatten() { @@ -610,7 +608,9 @@ impl pallet_session::OneSessionHandler for Module { let validator_set_count = keys.len() as u32; let offence = UnresponsivenessOffence { session_index, validator_set_count, offenders }; - T::ReportUnresponsiveness::report_offence(vec![], offence); + if let Err(e) = T::ReportUnresponsiveness::report_offence(vec![], offence) { + sp_runtime::print(e); + } } } @@ -619,7 +619,6 @@ impl pallet_session::OneSessionHandler for Module { } } -#[allow(deprecated)] impl frame_support::unsigned::ValidateUnsigned for Module { type Call = Call; diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index 7ee4c89ab46d3956154848add0b07b5ab5ae92c5..78b6409d543eb366ca77e5b94a1cea2ac92973cb 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -22,7 +22,7 @@ use std::cell::RefCell; use crate::{Module, Trait}; use sp_runtime::Perbill; -use sp_staking::{SessionIndex, offence::ReportOffence}; +use sp_staking::{SessionIndex, offence::{ReportOffence, OffenceError}}; use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; use sp_runtime::traits::{IdentityLookup, BlakeTwo256, ConvertInto}; use sp_core::H256; @@ -49,6 +49,7 @@ impl pallet_session::SessionManager for TestSessionManager { VALIDATORS.with(|l| l.borrow_mut().take()) } fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} } impl pallet_session::historical::SessionManager for TestSessionManager { @@ -62,6 +63,7 @@ impl pallet_session::historical::SessionManager for TestSessionManager ) } fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} } /// An extrinsic type used for tests. @@ -77,8 +79,9 @@ thread_local! { /// A mock offence report handler. pub struct OffenceHandler; impl ReportOffence for OffenceHandler { - fn report_offence(reporters: Vec, offence: Offence) { + fn report_offence(reporters: Vec, offence: Offence) -> Result<(), OffenceError> { OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); + Ok(()) } } @@ -117,7 +120,7 @@ impl frame_system::Trait for Runtime { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } parameter_types! { diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index 80056023e505c48fdc066cfdcc47559d84bab257..b43adca0fd485d36b5c8d2f8d81ce365d8518711 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -112,7 +112,6 @@ fn heartbeat( authority_index: u32, id: UintAuthorityId, ) -> dispatch::DispatchResult { - #[allow(deprecated)] use frame_support::unsigned::ValidateUnsigned; let heartbeat = Heartbeat { @@ -126,7 +125,6 @@ fn heartbeat( }; let signature = id.sign(&heartbeat.encode()).unwrap(); - #[allow(deprecated)] // Allow ValidateUnsigned ImOnline::pre_dispatch(&crate::Call::heartbeat(heartbeat.clone(), signature.clone())) .map_err(|e| <&'static str>::from(e))?; ImOnline::heartbeat( @@ -192,6 +190,8 @@ fn late_heartbeat_should_fail() { #[test] fn should_generate_heartbeats() { + use sp_runtime::traits::OffchainWorker; + let mut ext = new_test_ext(); let (offchain, _state) = TestOffchainExt::new(); let (pool, state) = TestTransactionPoolExt::new(); @@ -202,6 +202,7 @@ fn should_generate_heartbeats() { // given let block = 1; System::set_block_number(block); + UintAuthorityId::set_all_keys(vec![0, 1, 2]); // buffer new validators Session::rotate_session(); // enact the change and buffer another one @@ -209,17 +210,13 @@ fn should_generate_heartbeats() { Session::rotate_session(); // when - UintAuthorityId::set_all_keys(vec![0, 1, 2]); - ImOnline::send_heartbeats(2) - .unwrap() - // make sure to consume the iterator and check there are no errors. - .collect::, _>>().unwrap(); - + ImOnline::offchain_worker(block); // then let transaction = state.write().transactions.pop().unwrap(); - // All validators have `0` as their session key, so we generate 3 transactions. + // All validators have `0` as their session key, so we generate 2 transactions. assert_eq!(state.read().transactions.len(), 2); + // check stuff about the transaction. let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); let heartbeat = match ex.call { @@ -228,7 +225,7 @@ fn should_generate_heartbeats() { }; assert_eq!(heartbeat, Heartbeat { - block_number: 2, + block_number: block, network_state: sp_io::offchain::network_state().unwrap(), session_index: 2, authority_index: 2, diff --git a/frame/indices/Cargo.toml b/frame/indices/Cargo.toml index eb5298dcfa569fa691121002fd5d736b34816f89..d7e01765b59d165a2f1f8d52e44e1e3b61ee18e4 100644 --- a/frame/indices/Cargo.toml +++ b/frame/indices/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-indices" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME indices management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index d59a50179372d0064da3455ba34020c98de16b55..95ac6cf75283816bb3058f148d849f365de1345d 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -264,7 +264,7 @@ impl StaticLookup for Module { type Source = address::Address; type Target = T::AccountId; - fn lookup(a: Self::Source) -> Result { + fn lookup(a: Self::Source) -> Result { Self::lookup_address(a).ok_or(LookupError) } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index fe01b680bcc0187b9b53e550e8f056c47ae01fbb..355b3cc792c9456a0537c263d0f79a5ec58e7042 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -67,7 +67,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { diff --git a/frame/membership/Cargo.toml b/frame/membership/Cargo.toml index 719718505e2e42beb2cbdd1a74d26de1ea0ed3b5..b54109083dc0d524c127668573aea89c02b14c87 100644 --- a/frame/membership/Cargo.toml +++ b/frame/membership/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "pallet-membership" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME membership management pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index 1ad187b2ee35e23c14f9989623f1399da3a3197d..129f3c4003bdd306a8feedcd4f2d39ffb6ddfa72 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -17,7 +17,7 @@ //! # Membership Module //! //! Allows control of membership of a set of `AccountId`s, useful for managing membership of of a -//! collective. +//! collective. A prime member may be set. // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] @@ -47,6 +47,9 @@ pub trait Trait: frame_system::Trait { /// Required origin for resetting membership. type ResetOrigin: EnsureOrigin; + /// Required origin for setting or resetting the prime member. + type PrimeOrigin: EnsureOrigin; + /// The receiver of the signal for when the membership has been initialized. This happens pre- /// genesis and will usually be the same as `MembershipChanged`. If you need to do something /// different on initialization, then you can change this accordingly. @@ -60,6 +63,9 @@ decl_storage! { trait Store for Module, I: Instance=DefaultInstance> as Membership { /// The current membership, stored as an ordered Vec. Members get(fn members): Vec; + + /// The current prime member, if one exists. + Prime get(fn prime): Option; } add_extra_genesis { config(members): Vec; @@ -144,6 +150,7 @@ decl_module! { >::put(&members); T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); + Self::rejig_prime(&members); Self::deposit_event(RawEvent::MemberRemoved); } @@ -151,6 +158,8 @@ decl_module! { /// Swap out one member `remove` for another `add`. /// /// May only be called from `SwapOrigin` or root. + /// + /// Prime membership is *not* passed from `remove` to `add`, if extant. #[weight = SimpleDispatchInfo::FixedNormal(50_000)] fn swap_member(origin, remove: T::AccountId, add: T::AccountId) { T::SwapOrigin::try_origin(origin) @@ -171,6 +180,7 @@ decl_module! { &[remove], &members[..], ); + Self::rejig_prime(&members); Self::deposit_event(RawEvent::MembersSwapped); } @@ -189,15 +199,19 @@ decl_module! { members.sort(); >::mutate(|m| { T::MembershipChanged::set_members_sorted(&members[..], m); + Self::rejig_prime(&members); *m = members; }); + Self::deposit_event(RawEvent::MembersReset); } /// Swap out the sending member for some other key `new`. /// /// May only be called from `Signed` origin of a current member. + /// + /// Prime membership is passed from the origin account to `new`, if extant. #[weight = SimpleDispatchInfo::FixedNormal(50_000)] fn change_key(origin, new: T::AccountId) { let remove = ensure_signed(origin)?; @@ -211,14 +225,51 @@ decl_module! { >::put(&members); T::MembershipChanged::change_members_sorted( - &[new], - &[remove], + &[new.clone()], + &[remove.clone()], &members[..], ); + + if Prime::::get() == Some(remove) { + Prime::::put(&new); + T::MembershipChanged::set_prime(Some(new)); + } } Self::deposit_event(RawEvent::KeyChanged); } + + /// Set the prime member. Must be a current member. + #[weight = SimpleDispatchInfo::FixedNormal(50_000)] + fn set_prime(origin, who: T::AccountId) { + T::PrimeOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; + Prime::::put(&who); + T::MembershipChanged::set_prime(Some(who)); + } + + /// Remove the prime member if it exists. + #[weight = SimpleDispatchInfo::FixedNormal(50_000)] + fn clear_prime(origin) { + T::PrimeOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + Prime::::kill(); + T::MembershipChanged::set_prime(None); + } + } +} + +impl, I: Instance> Module { + fn rejig_prime(members: &[T::AccountId]) { + if let Some(prime) = Prime::::get() { + match members.binary_search(&prime) { + Ok(_) => T::MembershipChanged::set_prime(Some(prime)), + Err(_) => Prime::::kill(), + } + } } } @@ -271,7 +322,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } ord_parameter_types! { pub const One: u64 = 1; @@ -283,6 +334,7 @@ mod tests { thread_local! { static MEMBERS: RefCell> = RefCell::new(vec![]); + static PRIME: RefCell> = RefCell::new(None); } pub struct TestChangeMembers; @@ -297,6 +349,10 @@ mod tests { assert_eq!(old_plus_incoming, new_plus_outgoing); MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + PRIME.with(|p| *p.borrow_mut() = None); + } + fn set_prime(who: Option) { + PRIME.with(|p| *p.borrow_mut() = who); } } impl InitializeMembers for TestChangeMembers { @@ -311,6 +367,7 @@ mod tests { type RemoveOrigin = EnsureSignedBy; type SwapOrigin = EnsureSignedBy; type ResetOrigin = EnsureSignedBy; + type PrimeOrigin = EnsureSignedBy; type MembershipInitialized = TestChangeMembers; type MembershipChanged = TestChangeMembers; } @@ -337,6 +394,21 @@ mod tests { }); } + #[test] + fn prime_member_works() { + new_test_ext().execute_with(|| { + assert_noop!(Membership::set_prime(Origin::signed(4), 20), BadOrigin); + assert_noop!(Membership::set_prime(Origin::signed(5), 15), Error::::NotMember); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::clear_prime(Origin::signed(5))); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } + #[test] fn add_member_works() { new_test_ext().execute_with(|| { @@ -353,9 +425,12 @@ mod tests { new_test_ext().execute_with(|| { assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); assert_noop!(Membership::remove_member(Origin::signed(2), 15), Error::::NotMember); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::remove_member(Origin::signed(2), 20)); assert_eq!(Membership::members(), vec![10, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); } @@ -365,11 +440,19 @@ mod tests { assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); assert_noop!(Membership::swap_member(Origin::signed(3), 15, 25), Error::::NotMember); assert_noop!(Membership::swap_member(Origin::signed(3), 10, 30), Error::::AlreadyMember); + + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); assert_eq!(Membership::members(), vec![10, 20, 30]); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); assert_ok!(Membership::swap_member(Origin::signed(3), 10, 25)); assert_eq!(Membership::members(), vec![20, 25, 30]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); } @@ -385,11 +468,14 @@ mod tests { #[test] fn change_key_works() { new_test_ext().execute_with(|| { + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); assert_noop!(Membership::change_key(Origin::signed(3), 25), Error::::NotMember); assert_noop!(Membership::change_key(Origin::signed(10), 20), Error::::AlreadyMember); assert_ok!(Membership::change_key(Origin::signed(10), 40)); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), Some(40)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); } @@ -405,10 +491,20 @@ mod tests { #[test] fn reset_members_works() { new_test_ext().execute_with(|| { + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); assert_noop!(Membership::reset_members(Origin::signed(1), vec![20, 40, 30]), BadOrigin); + assert_ok!(Membership::reset_members(Origin::signed(4), vec![20, 40, 30])); assert_eq!(Membership::members(), vec![20, 30, 40]); assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::reset_members(Origin::signed(4), vec![10, 40, 30])); + assert_eq!(Membership::members(), vec![10, 30, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); }); } } diff --git a/frame/metadata/Cargo.toml b/frame/metadata/Cargo.toml index e245f04849a40b9e1a9c1c1250fdf659c3fe8db7..b3d333f369fbd901c5fa2d597fe3bedb063462ea 100644 --- a/frame/metadata/Cargo.toml +++ b/frame/metadata/Cargo.toml @@ -1,15 +1,18 @@ [package] name = "frame-metadata" -version = "11.0.0" +version = "11.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Decodable variant of the RuntimeMetadata." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/nicks/Cargo.toml b/frame/nicks/Cargo.toml index 7c05080d49110a6bf6cc0636d46ef046fe175c44..07c84c439ffaa57dedb4e8a2da8647e9d02460df 100644 --- a/frame/nicks/Cargo.toml +++ b/frame/nicks/Cargo.toml @@ -1,22 +1,25 @@ [package] name = "pallet-nicks" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for nick management" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index 2b2d59014d8b24d78784baca4b672711f77777a1..caed6e40accb0c4909f64ad2a4e3317a27e8aa95 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -287,7 +287,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/offences/Cargo.toml b/frame/offences/Cargo.toml index f5486f35ffd95f204929842906940c0b1119e186..99c52e40885378f8eccee76a75887ef216fdf38f 100644 --- a/frame/offences/Cargo.toml +++ b/frame/offences/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-offences" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME offences pallet" [dependencies] -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } serde = { version = "1.0.101", optional = true } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 7831ba65a3b067b4b68746d4272073e3a79b77c8..27983cbb5332e57e95724999338e850dfd77f7c9 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -30,7 +30,7 @@ use frame_support::{ }; use sp_runtime::traits::Hash; use sp_staking::{ - offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails}, + offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails, OffenceError}, }; use codec::{Encode, Decode}; use frame_system as system; @@ -90,7 +90,7 @@ impl> where T::IdentificationTuple: Clone, { - fn report_offence(reporters: Vec, offence: O) { + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { let offenders = offence.offenders(); let time_slot = offence.time_slot(); let validator_set_count = offence.validator_set_count(); @@ -104,7 +104,7 @@ where ) { Some(triage) => triage, // The report contained only duplicates, so there is no need to slash again. - None => return, + None => return Err(OffenceError::DuplicateReport), }; // Deposit the event. @@ -123,6 +123,8 @@ where &slash_perbill, offence.session_index(), ); + + Ok(()) } } diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index f2e19b63f5a47d21670336619ed9a12f812c9d1b..a003ad69157fcf5e8bf2f9c296aea3dd010e1b7d 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -91,7 +91,7 @@ impl frame_system::Trait for Runtime { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl Trait for Runtime { diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index f2f82cf7a87eee70677d28dc81cb88808ef19451..0ed98427c65f8eba4d02be1e679dc1e8db52d791 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -40,7 +40,7 @@ fn should_report_an_authority_and_trigger_on_offence() { }; // when - Offences::report_offence(vec![], offence); + Offences::report_offence(vec![], offence).unwrap(); // then with_on_offence_fractions(|f| { @@ -61,7 +61,7 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { time_slot, offenders: vec![5], }; - Offences::report_offence(vec![], offence.clone()); + Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); f.clear(); @@ -69,7 +69,7 @@ fn should_not_report_the_same_authority_twice_in_the_same_slot() { // when // report for the second time - Offences::report_offence(vec![], offence); + assert_eq!(Offences::report_offence(vec![], offence), Err(OffenceError::DuplicateReport)); // then with_on_offence_fractions(|f| { @@ -91,7 +91,7 @@ fn should_report_in_different_time_slot() { time_slot, offenders: vec![5], }; - Offences::report_offence(vec![], offence.clone()); + Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); f.clear(); @@ -100,7 +100,7 @@ fn should_report_in_different_time_slot() { // when // report for the second time offence.time_slot += 1; - Offences::report_offence(vec![], offence); + Offences::report_offence(vec![], offence).unwrap(); // then with_on_offence_fractions(|f| { @@ -123,7 +123,7 @@ fn should_deposit_event() { }; // when - Offences::report_offence(vec![], offence); + Offences::report_offence(vec![], offence).unwrap(); // then assert_eq!( @@ -149,7 +149,7 @@ fn doesnt_deposit_event_for_dups() { time_slot, offenders: vec![5], }; - Offences::report_offence(vec![], offence.clone()); + Offences::report_offence(vec![], offence.clone()).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); f.clear(); @@ -157,7 +157,7 @@ fn doesnt_deposit_event_for_dups() { // when // report for the second time - Offences::report_offence(vec![], offence); + assert_eq!(Offences::report_offence(vec![], offence), Err(OffenceError::DuplicateReport)); // then // there is only one event. @@ -191,7 +191,7 @@ fn should_properly_count_offences() { time_slot, offenders: vec![4], }; - Offences::report_offence(vec![], offence1); + Offences::report_offence(vec![], offence1).unwrap(); with_on_offence_fractions(|f| { assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); f.clear(); @@ -199,7 +199,7 @@ fn should_properly_count_offences() { // when // report for the second time - Offences::report_offence(vec![], offence2); + Offences::report_offence(vec![], offence2).unwrap(); // then // the 1st authority should have count 2 and the 2nd one should be reported only once. diff --git a/frame/randomness-collective-flip/Cargo.toml b/frame/randomness-collective-flip/Cargo.toml index 56ff12d2dfe810584c67e3fd0ae56b2c59365d90..4e4ce76fee7edb7d39402be261c078925f63bb79 100644 --- a/frame/randomness-collective-flip/Cargo.toml +++ b/frame/randomness-collective-flip/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "pallet-randomness-collective-flip" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME randomness collective flip pallet" [dependencies] safe-mix = { version = "1.0", default-features = false } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-io = { version = "2.0.0", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } [features] default = ["std"] diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 53d640688ef7dfaa0e4cfb171de84ce656546316..0ded7dd6b0c64e5c932cf42d2f7478dea1ef4723 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -193,7 +193,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } type System = frame_system::Module; diff --git a/frame/recovery/Cargo.toml b/frame/recovery/Cargo.toml index 645b1f2e98a34b74d909ec1947515d9289d2c980..80456aa375dd2e569300aac339642f5124930397 100644 --- a/frame/recovery/Cargo.toml +++ b/frame/recovery/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-recovery" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME account recovery pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 6fa00af751e67cb33b0b05f8dd3b189083989462..1b3c9416388f94315a811f5330bf797f4f927a8e 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -159,9 +159,8 @@ use codec::{Encode, Decode}; use frame_support::{ decl_module, decl_event, decl_storage, decl_error, ensure, - Parameter, RuntimeDebug, - weights::{GetDispatchInfo, SimpleDispatchInfo, FunctionOf}, - traits::{Currency, ReservableCurrency, Get, OnReapAccount, BalanceStatus}, + Parameter, RuntimeDebug, weights::{GetDispatchInfo, SimpleDispatchInfo, FunctionOf}, + traits::{Currency, ReservableCurrency, Get, BalanceStatus}, }; use frame_system::{self as system, ensure_signed, ensure_root}; @@ -241,6 +240,7 @@ decl_storage! { pub Recoverable get(fn recovery_config): map hasher(blake2_256) T::AccountId => Option, T::AccountId>>; + /// Active recovery attempts. /// /// First account is the account to be recovered, and the second account @@ -248,10 +248,11 @@ decl_storage! { pub ActiveRecoveries get(fn active_recovery): double_map hasher(twox_64_concat) T::AccountId, hasher(twox_64_concat) T::AccountId => Option, T::AccountId>>; - /// The final list of recovered accounts. + + /// The list of allowed proxy accounts. /// - /// Map from the recovered account to the user who can access it. - pub Recovered get(fn recovered_account): + /// Map from the user who can access it to the recovered account. + pub Proxy get(fn proxy): map hasher(blake2_256) T::AccountId => Option; } } @@ -308,6 +309,8 @@ decl_error! { StillActive, /// There was an overflow in a calculation Overflow, + /// This account is already set up for recovery + AlreadyProxy, } } @@ -332,7 +335,7 @@ decl_module! { /// - One storage lookup to check account is recovered by `who`. O(1) /// # #[weight = FunctionOf( - |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, + |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().class, true )] @@ -342,7 +345,8 @@ decl_module! { ) -> DispatchResult { let who = ensure_signed(origin)?; // Check `who` is allowed to make a call on behalf of `account` - ensure!(Self::recovered_account(&account) == Some(who), Error::::NotAllowed); + let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; + ensure!(&target == &account, Error::::NotAllowed); call.dispatch(frame_system::RawOrigin::Signed(account).into()) } @@ -363,7 +367,7 @@ decl_module! { fn set_recovered(origin, lost: T::AccountId, rescuer: T::AccountId) { ensure_root(origin)?; // Create the recovery storage item. - >::insert(&lost, &rescuer); + >::insert(&rescuer, &lost); Self::deposit_event(RawEvent::AccountRecovered(lost, rescuer)); } @@ -428,6 +432,7 @@ decl_module! { }; // Create the recovery configuration storage item >::insert(&who, recovery_config); + Self::deposit_event(RawEvent::RecoveryCreated(who)); } @@ -545,6 +550,7 @@ decl_module! { let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; // Get the active recovery process for the rescuer let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; + ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); // Make sure the delay period has passed let current_block_number = >::block_number(); let recoverable_block_number = active_recovery.created @@ -557,7 +563,8 @@ decl_module! { Error::::Threshold ); // Create the recovery storage item - >::insert(&account, &who); + Proxy::::insert(&who, &account); + system::Module::::inc_ref(&who); Self::deposit_event(RawEvent::AccountRecovered(account, who)); } @@ -592,7 +599,7 @@ decl_module! { Self::deposit_event(RawEvent::RecoveryClosed(who, rescuer)); } - /// Remove the recovery process for your account. + /// Remove the recovery process for your account. Recovered accounts are still accessible. /// /// NOTE: The user must make sure to call `close_recovery` on all active /// recovery attempts before calling this function else it will fail. @@ -621,10 +628,30 @@ decl_module! { ensure!(active_recoveries.next().is_none(), Error::::StillActive); // Take the recovery configuration for this account. let recovery_config = >::take(&who).ok_or(Error::::NotRecoverable)?; + // Unreserve the initial deposit for the recovery configuration. T::Currency::unreserve(&who, recovery_config.deposit); Self::deposit_event(RawEvent::RecoveryRemoved(who)); } + + /// Cancel the ability to use `as_recovered` for `account`. + /// + /// The dispatch origin for this call must be _Signed_ and registered to + /// be able to make calls on behalf of the recovered account. + /// + /// Parameters: + /// - `account`: The recovered account you are able to call on-behalf-of. + /// + /// # + /// - One storage mutation to check account is recovered by `who`. O(1) + /// # + fn cancel_recovered(origin, account: T::AccountId) { + let who = ensure_signed(origin)?; + // Check `who` is allowed to make a call on behalf of `account` + ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); + Proxy::::remove(&who); + system::Module::::dec_ref(&who); + } } } @@ -639,11 +666,3 @@ impl Module { friends.binary_search(&friend).is_ok() } } - -impl OnReapAccount for Module { - /// Remove any existing access another account might have when the account is reaped. - /// This removes the final storage item managed by this module for any given account. - fn on_reap_account(who: &T::AccountId) { - >::remove(who); - } -} diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index 97ee07bc6a2e9666714ef9f309b9171b808a4669..a5b7731c2286a3c4af60ee3ec54f64060c0953a5 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -80,7 +80,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = (Balances, Recovery); + type OnKilledAccount = (); } parameter_types! { diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index 97d4791cce508052d212117a4120290d53599eaa..9c644291c906dcbf00ab0185d3e1065fc8d54fb1 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -31,7 +31,7 @@ use frame_support::{ fn basic_setup_works() { new_test_ext().execute_with(|| { // Nothing in storage to start - assert_eq!(Recovery::recovered_account(&1), None); + assert_eq!(Recovery::proxy(&2), None); assert_eq!(Recovery::active_recovery(&1, &2), None); assert_eq!(Recovery::recovery_config(&1), None); // Everyone should have starting balance of 100 @@ -91,10 +91,13 @@ fn recovery_life_cycle_works() { // All funds have been fully recovered! assert_eq!(Balances::free_balance(1), 200); assert_eq!(Balances::free_balance(5), 0); + // Remove the proxy link. + assert_ok!(Recovery::cancel_recovered(Origin::signed(1), 5)); + // All storage items are removed from the module assert!(!>::contains_key(&5, &1)); assert!(!>::contains_key(&5)); - assert!(!>::contains_key(&5)); + assert!(!>::contains_key(&1)); }); } @@ -335,7 +338,7 @@ fn claim_recovery_works() { // Account can be recovered. assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); // Recovered storage item is correctly created - assert_eq!(>::get(&5), Some(1)); + assert_eq!(>::get(&1), Some(5)); // Account could be re-recovered in the case that the recoverer account also gets lost. assert_ok!(Recovery::initiate_recovery(Origin::signed(4), 5)); assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 4)); @@ -347,7 +350,7 @@ fn claim_recovery_works() { // Account is re-recovered. assert_ok!(Recovery::claim_recovery(Origin::signed(4), 5)); // Recovered storage item is correctly updated - assert_eq!(>::get(&5), Some(4)); + assert_eq!(>::get(&4), Some(5)); }); } diff --git a/frame/scored-pool/Cargo.toml b/frame/scored-pool/Cargo.toml index c0935cd88be925dbe59fc0ba7ecf253ca354d124..a1b7b39d8cd9ffc2b548522168b0599ca5958678 100644 --- a/frame/scored-pool/Cargo.toml +++ b/frame/scored-pool/Cargo.toml @@ -1,22 +1,25 @@ [package] name = "pallet-scored-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for scored pools" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 90006da91e99a2372b8998e902ab9bbf90a368e1..a28b7891370eafbdc47260ef602ae2a69660c119 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -72,7 +72,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { diff --git a/frame/session/Cargo.toml b/frame/session/Cargo.toml index 06981240b1576cdc85a2de3aa09fb62ec2bff992..74ca9fe67bb41968927f5050a7db90466c2bc6e4 100644 --- a/frame/session/Cargo.toml +++ b/frame/session/Cargo.toml @@ -1,26 +1,29 @@ [package] name = "pallet-session" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME sessions pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../timestamp" } -sp-trie = { optional = true, path = "../../primitives/trie", default-features = false } -sp-io ={ path = "../../primitives/io", default-features = false } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../timestamp" } +sp-trie = { optional = true, path = "../../primitives/trie", default-features = false , version = "2.0.0-alpha.2"} +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-application-crypto = { version = "2.0.0", path = "../../primitives/application-crypto" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../../primitives/application-crypto" } lazy_static = "1.4.0" [features] diff --git a/frame/session/src/historical.rs b/frame/session/src/historical.rs index cf20f31360cb68737e8457e28eeef1b6b33b8770..91719c27f27ccc59ab7ad1956a0ae1afcb7714c8 100644 --- a/frame/session/src/historical.rs +++ b/frame/session/src/historical.rs @@ -27,8 +27,8 @@ use sp_std::prelude::*; use codec::{Encode, Decode}; -use sp_runtime::KeyTypeId; -use sp_runtime::traits::{Convert, OpaqueKeys, Hash as HashT}; +use sp_runtime::{KeyTypeId, RuntimeDebug}; +use sp_runtime::traits::{Convert, OpaqueKeys}; use frame_support::{decl_module, decl_storage}; use frame_support::{Parameter, print}; use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; @@ -108,6 +108,7 @@ pub trait SessionManager: crate::SessionManager /// If there was a validator set change, its returns the set of new validators along with their /// full identifications. fn new_session(new_index: SessionIndex) -> Option>; + fn start_session(start_index: SessionIndex); fn end_session(end_index: SessionIndex); } @@ -146,19 +147,20 @@ impl crate::SessionManager for NoteHistoricalRoot>::start_session(start_index) + } fn end_session(end_index: SessionIndex) { >::end_session(end_index) } } -type HasherOf = <::Hashing as HashT>::Hasher; - /// A tuple of the validator's ID and their full identification. pub type IdentificationTuple = (::ValidatorId, ::FullIdentification); /// a trie instance for checking and generating proofs. pub struct ProvingTrie { - db: MemoryDB>, + db: MemoryDB, root: T::Hash, } @@ -256,12 +258,19 @@ impl ProvingTrie { } /// Proof of ownership of a specific key. -#[derive(Encode, Decode, Clone)] +#[derive(Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug)] pub struct Proof { session: SessionIndex, trie_nodes: Vec>, } +impl Proof { + /// Returns a session this proof was generated for. + pub fn session(&self) -> SessionIndex { + self.session + } +} + impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> for Module { @@ -318,7 +327,7 @@ mod tests { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); crate::GenesisConfig:: { keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, UintAuthorityId(i).into())).collect() + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() ), }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index 0abe06527b39114594010963168704f4393380fb..1097cfd6be2e425267fd3863f24dd94436185007 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -105,8 +105,9 @@ use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic, BoundToRuntimeAppPublic}; use frame_support::weights::SimpleDispatchInfo; use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys}; use sp_staking::SessionIndex; -use frame_support::{dispatch, ConsensusEngineId, decl_module, decl_event, decl_storage, decl_error}; -use frame_support::{ensure, traits::{OnReapAccount, Get, FindAuthor, ValidatorRegistration}, Parameter}; +use frame_support::{ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId}; +use frame_support::{traits::{Get, FindAuthor, ValidatorRegistration}, Parameter}; +use frame_support::dispatch::{self, DispatchResult, DispatchError}; use frame_system::{self as system, ensure_signed}; #[cfg(test)] @@ -161,10 +162,15 @@ pub trait SessionManager { /// Because the session pallet can queue validator set the ending session can be lower than the /// last new session index. fn end_session(end_index: SessionIndex); + /// Start the session. + /// + /// The session start to be used for validation + fn start_session(start_index: SessionIndex); } impl SessionManager for () { fn new_session(_: SessionIndex) -> Option> { None } + fn start_session(_: SessionIndex) {} fn end_session(_: SessionIndex) {} } @@ -361,6 +367,7 @@ decl_storage! { /// /// The first key is always `DEDUP_KEY_PREFIX` to have all the data in the same branch of /// the trie. Having all data in the same branch should prevent slowing down other queries. + // TODO: Migrate to a normal map now https://github.com/paritytech/substrate/issues/4917 NextKeys: double_map hasher(twox_64_concat) Vec, hasher(blake2_256) T::ValidatorId => Option; @@ -368,11 +375,12 @@ decl_storage! { /// /// The first key is always `DEDUP_KEY_PREFIX` to have all the data in the same branch of /// the trie. Having all data in the same branch should prevent slowing down other queries. + // TODO: Migrate to a normal map now https://github.com/paritytech/substrate/issues/4917 KeyOwner: double_map hasher(twox_64_concat) Vec, hasher(blake2_256) (KeyTypeId, Vec) => Option; } add_extra_genesis { - config(keys): Vec<(T::ValidatorId, T::Keys)>; + config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; build(|config: &GenesisConfig| { if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { panic!("Number of keys in session handler and session keys does not match"); @@ -388,21 +396,17 @@ decl_storage! { } }); - for (who, keys) in config.keys.iter().cloned() { - assert!( - >::load_keys(&who).is_none(), - "genesis config contained duplicate validator {:?}", who, - ); - - >::do_set_keys(&who, keys) + for (account, val, keys) in config.keys.iter().cloned() { + >::inner_set_keys(&val, keys) .expect("genesis config must not contain duplicates; qed"); + system::Module::::inc_ref(&account); } let initial_validators_0 = T::SessionManager::new_session(0) .unwrap_or_else(|| { frame_support::print("No initial validator provided by `SessionManager`, use \ session config keys to generate initial validator set."); - config.keys.iter().map(|(ref v, _)| v.clone()).collect() + config.keys.iter().map(|x| x.1.clone()).collect() }); assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); @@ -424,6 +428,8 @@ decl_storage! { >::put(initial_validators_0); >::put(queued_keys); + + T::SessionManager::start_session(0); }); } } @@ -445,6 +451,8 @@ decl_error! { NoAssociatedValidatorId, /// Registered duplicate key. DuplicatedKey, + /// No keys are associated with this account. + NoKeys, } } @@ -467,6 +475,8 @@ decl_module! { /// # /// - O(log n) in number of accounts. /// - One extra DB entry. + /// - Increases system account refs by one on success iff there were previously no keys set. + /// In this case, purge_keys will need to be called before the account can be removed. /// # #[weight = SimpleDispatchInfo::FixedNormal(150_000)] fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { @@ -474,13 +484,27 @@ decl_module! { ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); - let who = T::ValidatorIdOf::convert(who).ok_or(Error::::NoAssociatedValidatorId)?; - Self::do_set_keys(&who, keys)?; Ok(()) } + /// Removes any session key(s) of the function caller. + /// This doesn't take effect until the next session. + /// + /// The dispatch origin of this function must be signed. + /// + /// # + /// - O(N) in number of key types. + /// - Removes N + 1 DB entries. + /// - Reduces system account refs by one on success. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(150_000)] + fn purge_keys(origin) { + let who = ensure_signed(origin)?; + Self::do_purge_keys(&who)?; + } + /// Called when a block is initialized. Will rotate session if it is the last /// block of the current session. fn on_initialize(n: T::BlockNumber) { @@ -503,6 +527,8 @@ impl Module { // Inform the session handlers that a session is going to end. T::SessionHandler::on_before_session_ending(); + T::SessionManager::end_session(session_index); + // Get queued session keys and validators. let session_keys = >::get(); let validators = session_keys.iter() @@ -515,12 +541,12 @@ impl Module { DisabledValidators::take(); } - T::SessionManager::end_session(session_index); - // Increment session index. let session_index = session_index + 1; CurrentIndex::put(session_index); + T::SessionManager::start_session(session_index); + // Get next validator set. let maybe_next_validators = T::SessionManager::new_session(session_index + 1); let (next_validators, next_identities_changed) @@ -612,10 +638,30 @@ impl Module { Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) } - // perform the set_key operation, checking for duplicates. - // does not set `Changed`. - fn do_set_keys(who: &T::ValidatorId, keys: T::Keys) -> dispatch::DispatchResult { - let old_keys = Self::load_keys(&who); + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. + /// + /// This ensures that the reference counter in system is incremented appropriately and as such + /// must accept an account ID, rather than a validator ID. + fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> dispatch::DispatchResult { + let who = T::ValidatorIdOf::convert(account.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + + let old_keys = Self::inner_set_keys(&who, keys)?; + if old_keys.is_none() { + system::Module::::inc_ref(&account); + } + + Ok(()) + } + + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. + /// + /// The old keys for this validator are returned, or `None` if there were none. + /// + /// This does not ensure that the reference counter in system is incremented appropriately, it + /// must be done by the caller or the keys will be leaked in storage. + fn inner_set_keys(who: &T::ValidatorId, keys: T::Keys) -> Result, DispatchError> { + let old_keys = Self::load_keys(who); for id in T::Keys::key_ids() { let key = keys.get_raw(*id); @@ -634,21 +680,25 @@ impl Module { Self::clear_key_owner(*id, old); } - Self::put_key_owner(*id, key, &who); + Self::put_key_owner(*id, key, who); } - Self::put_keys(&who, &keys); - - Ok(()) + Self::put_keys(who, &keys); + Ok(old_keys) } - fn prune_dead_keys(who: &T::ValidatorId) { - if let Some(old_keys) = Self::take_keys(who) { - for id in T::Keys::key_ids() { - let key_data = old_keys.get_raw(*id); - Self::clear_key_owner(*id, key_data); - } + fn do_purge_keys(account: &T::AccountId) -> DispatchResult { + let who = T::ValidatorIdOf::convert(account.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + + let old_keys = Self::take_keys(&who).ok_or(Error::::NoKeys)?; + for id in T::Keys::key_ids() { + let key_data = old_keys.get_raw(*id); + Self::clear_key_owner(*id, key_data); } + system::Module::::dec_ref(&account); + + Ok(()) } fn load_keys(v: &T::ValidatorId) -> Option { @@ -676,12 +726,6 @@ impl Module { } } -impl OnReapAccount for Module { - fn on_reap_account(who: &T::ValidatorId) { - Self::prune_dead_keys(who); - } -} - /// Wraps the author-scraping logic for consensus engines that can recover /// the canonical index of an author. This then transforms it into the /// registering account-ID of that session key index. @@ -716,7 +760,7 @@ mod tests { let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); GenesisConfig:: { keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, UintAuthorityId(i).into())).collect() + l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() ), }.assimilate_storage(&mut t).unwrap(); sp_io::TestExternalities::new(t) @@ -754,7 +798,10 @@ mod tests { let id = DUMMY; assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - Session::on_reap_account(&1); + assert!(!System::allow_death(&1)); + assert_ok!(Session::purge_keys(Origin::signed(1))); + assert!(System::allow_death(&1)); + assert_eq!(Session::load_keys(&1), None); assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); }) diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 0c922670697aa1856b65529571e9e4c3009ea5b4..9d64285b900f8645d0f2d76cbe736bc7a908194f 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -92,6 +92,7 @@ impl SessionHandler for TestSessionHandler { pub struct TestSessionManager; impl SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} fn new_session(_: SessionIndex) -> Option> { if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { VALIDATORS.with(|v| { @@ -112,6 +113,7 @@ impl SessionManager for TestSessionManager { #[cfg(feature = "historical")] impl crate::historical::SessionManager for TestSessionManager { fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} fn new_session(new_index: SessionIndex) -> Option> { @@ -178,7 +180,7 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = Session; + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Test { diff --git a/frame/society/Cargo.toml b/frame/society/Cargo.toml index ac140c971420b281c330a8072cd6f4c92abb3e21..35b1c5c4a45159464b783ac201955b81ebd0d260 100644 --- a/frame/society/Cargo.toml +++ b/frame/society/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-society" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME society pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-io ={ path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } rand_chacha = { version = "0.2", default-features = false } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index 62be1aa9f1da3cd424d090919633d07adcf93382..158f139df5673e9568e247122f40ed1d64c6b0c7 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -78,7 +78,7 @@ impl frame_system::Trait for Test { type Version = (); type ModuleToIndex = (); type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); type AccountData = pallet_balances::AccountData; } diff --git a/frame/staking/Cargo.toml b/frame/staking/Cargo.toml index a38cc0416b7273353856181b39a6b55ecf004d2e..ef4bb60a29e6a593d1c2f4560edbe6dd73537b43 100644 --- a/frame/staking/Cargo.toml +++ b/frame/staking/Cargo.toml @@ -1,30 +1,33 @@ [package] name = "pallet-staking" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet staking" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-phragmen = { version = "2.0.0", default-features = false, path = "../../primitives/phragmen" } -sp-io ={ path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-staking = { version = "2.0.0", default-features = false, path = "../../primitives/staking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-session = { version = "2.0.0", features = ["historical"], path = "../session", default-features = false } -pallet-authorship = { version = "2.0.0", default-features = false, path = "../authorship" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-phragmen = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/phragmen" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-staking = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/staking" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-session = { version = "2.0.0-alpha.2", features = ["historical"], path = "../session", default-features = false } +pallet-authorship = { version = "2.0.0-alpha.2", default-features = false, path = "../authorship" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -pallet-timestamp = { version = "2.0.0", path = "../timestamp" } -pallet-staking-reward-curve = { version = "2.0.0", path = "../staking/reward-curve" } -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +pallet-timestamp = { version = "2.0.0-alpha.2", path = "../timestamp" } +pallet-staking-reward-curve = { version = "2.0.0-alpha.2", path = "../staking/reward-curve" } +substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } [features] migrate = [] diff --git a/frame/staking/reward-curve/Cargo.toml b/frame/staking/reward-curve/Cargo.toml index 0753400596e1340c3d4d272694fa728cda0340f0..d55813682e62192a28d8c68992524fea41d8aa6d 100644 --- a/frame/staking/reward-curve/Cargo.toml +++ b/frame/staking/reward-curve/Cargo.toml @@ -1,18 +1,21 @@ [package] name = "pallet-staking-reward-curve" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Reward Curve for FRAME staking pallet" [lib] proc-macro = true [dependencies] syn = { version = "1.0.7", features = ["full", "visit"] } -quote = "1.0" +quote = "1.0.3" proc-macro2 = "1.0.6" proc-macro-crate = "0.1.4" [dev-dependencies] -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index 3f84597912f53921cd0790f72d0a3a1a6037409e..2dc377c55af9d3ad78b03e658ed9e71704ec1953 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -104,6 +104,11 @@ //! The **reward and slashing** procedure is the core of the Staking module, attempting to _embrace //! valid behavior_ while _punishing any misbehavior or lack of availability_. //! +//! Reward must be claimed by stakers for each era before it gets too old by $HISTORY_DEPTH using +//! `payout_nominator` and `payout_validator` calls. +//! Only the [`T::MaxNominatorRewardedPerValidator`] biggest stakers can claim their reward. This +//! limit the i/o cost to compute nominators payout. +//! //! Slashing can occur at any point in time, once misbehavior is reported. Once slashing is //! determined, a value is deducted from the balance of the validator and all the nominators who //! voted for this validator (values are deducted from the _stash_ account of the slashed entity). @@ -122,6 +127,11 @@ //! //! An account can step back via the [`chill`](enum.Call.html#variant.chill) call. //! +//! ### Session managing +//! +//! The module implement the trait `SessionManager`. Which is the only API to query new validator +//! set and allowing these validator set to be rewarded once their era is ended. +//! //! ## Interface //! //! ### Dispatchable Functions @@ -159,14 +169,6 @@ //! //! ## Implementation Details //! -//! ### Slot Stake -//! -//! The term [`SlotStake`](./struct.Module.html#method.slot_stake) will be used throughout this -//! section. It refers to a value calculated at the end of each era, containing the _minimum value -//! at stake among all validators._ Note that a validator's value at stake might be a combination -//! of the validator's own stake and the votes it received. See [`Exposure`](./struct.Exposure.html) -//! for more details. -//! //! ### Reward Calculation //! //! Validators and nominators are rewarded at the end of each era. The total reward of an era is @@ -236,6 +238,7 @@ //! ## GenesisConfig //! //! The Staking module depends on the [`GenesisConfig`](./struct.GenesisConfig.html). +//! The `GenesisConfig` is optional and allow to set some initial stakers. //! //! ## Related Modules //! @@ -251,14 +254,16 @@ mod mock; #[cfg(test)] mod tests; mod slashing; +mod migration; pub mod inflation; -use sp_std::{prelude::*, result}; +use sp_std::{prelude::*, result, collections::btree_map::BTreeMap}; use codec::{HasCompact, Encode, Decode}; use frame_support::{ decl_module, decl_event, decl_storage, ensure, decl_error, weights::SimpleDispatchInfo, + dispatch::DispatchResult, traits::{ Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, Time @@ -269,20 +274,19 @@ use sp_runtime::{ Perbill, PerThing, RuntimeDebug, curve::PiecewiseLinear, traits::{ - Convert, Zero, One, StaticLookup, CheckedSub, Saturating, Bounded, SaturatedConversion, + Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, AtLeast32Bit, EnsureOrigin, } }; use sp_staking::{ SessionIndex, - offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence}, + offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, }; #[cfg(feature = "std")] use sp_runtime::{Serialize, Deserialize}; use frame_system::{self as system, ensure_signed, ensure_root}; use sp_phragmen::ExtendedBalance; -use frame_support::traits::OnReapAccount; const DEFAULT_MINIMUM_VALIDATOR_COUNT: u32 = 4; const MAX_NOMINATIONS: usize = 16; @@ -293,28 +297,29 @@ const STAKING_ID: LockIdentifier = *b"staking "; pub type EraIndex = u32; /// Counter for the number of "reward" points earned by a given validator. -pub type Points = u32; +pub type RewardPoint = u32; + +/// Information regarding the active era (era in used in session). +#[derive(Encode, Decode, RuntimeDebug)] +pub struct ActiveEraInfo { + /// Index of era. + index: EraIndex, + /// Moment of start + /// + /// Start can be none if start hasn't been set for the era yet, + /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. + start: Option, +} /// Reward points of an era. Used to split era total payout between validators. -#[derive(Encode, Decode, Default)] -pub struct EraPoints { +/// +/// This points will be used to reward validators and their respective nominators. +#[derive(PartialEq, Encode, Decode, Default, RuntimeDebug)] +pub struct EraRewardPoints { /// Total number of points. Equals the sum of reward points for each validator. - total: Points, - /// The reward points earned by a given validator. The index of this vec corresponds to the - /// index into the current validator set. - individual: Vec, -} - -impl EraPoints { - /// Add the reward to the validator at the given index. Index must be valid - /// (i.e. `index < current_elected.len()`). - fn add_points_to_index(&mut self, index: u32, points: u32) { - if let Some(new_total) = self.total.checked_add(points) { - self.total = new_total; - self.individual.resize((index as usize + 1).max(self.individual.len()), 0); - self.individual[index as usize] += points; // Addition is less than total - } - } + total: RewardPoint, + /// The reward points earned by a given validator. + individual: BTreeMap, } /// Indicates the initial status of the staker. @@ -390,6 +395,8 @@ pub struct StakingLedger { /// Any balance that is becoming free, which may eventually be transferred out /// of the stash (assuming it doesn't get slashed first). pub unlocking: Vec>, + /// The latest and highest era which the staker has claimed reward for. + pub last_reward: Option, } impl< @@ -408,7 +415,14 @@ impl< false }) .collect(); - Self { total, active: self.active, stash: self.stash, unlocking } + + Self { + stash: self.stash, + total, + active: self.active, + unlocking, + last_reward: self.last_reward + } } /// Re-bond funds that were scheduled for unlocking. @@ -499,6 +513,8 @@ pub struct Nominations { /// The targets of nomination. pub targets: Vec, /// The era the nominations were submitted. + /// + /// Except for initial nominations which are considered submitted at era 0. pub submitted_in: EraIndex, /// Whether the nominations have been suppressed. pub suppressed: bool, @@ -595,6 +611,9 @@ pub trait Trait: frame_system::Trait { type Currency: LockableCurrency; /// Time used for computing era duration. + /// + /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis + /// is not used. type Time: Time; /// Convert a balance into a number used for election calculation. @@ -635,6 +654,12 @@ pub trait Trait: frame_system::Trait { /// The NPoS reward curve to use. type RewardCurve: Get<&'static PiecewiseLinear<'static>>; + + /// The maximum number of nominator rewarded for each validator. + /// + /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim + /// their reward. This used to limit the i/o cost for the nominator payout. + type MaxNominatorRewardedPerValidator: Get; } /// Mode of era-forcing. @@ -655,11 +680,35 @@ impl Default for Forcing { fn default() -> Self { Forcing::NotForcing } } +// A value placed in storage that represents the current version of the Staking storage. +// This value is used by the `on_runtime_upgrade` logic to determine whether we run +// storage migration logic. This should match directly with the semantic versions of the Rust crate. +#[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] +enum Releases { + V1_0_0, + V2_0_0, +} + +impl Default for Releases { + fn default() -> Self { + Releases::V1_0_0 + } +} + decl_storage! { trait Store for Module as Staking { + /// Number of era to keep in history. + /// + /// Information is kept for eras in `[current_era - history_depth; current_era] + /// + /// Must be more than the number of era delayed by session otherwise. + /// i.e. active era must always be in history. + /// i.e. `active_era > current_era - history_depth` must be guaranteed. + HistoryDepth get(fn history_depth) config(): u32 = 84; /// The ideal number of staking participants. pub ValidatorCount get(fn validator_count) config(): u32; + /// Minimum number of staking participants before emergency conditions are imposed. pub MinimumValidatorCount get(fn minimum_validator_count) config(): u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; @@ -671,6 +720,7 @@ decl_storage! { /// Map from all locked "stash" accounts to the controller account. pub Bonded get(fn bonded): map hasher(blake2_256) T::AccountId => Option; + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. pub Ledger get(fn ledger): map hasher(blake2_256) T::AccountId @@ -684,40 +734,74 @@ decl_storage! { linked_map hasher(blake2_256) T::AccountId => ValidatorPrefs; /// The map from nominator stash key to the set of stash keys of all validators to nominate. - /// - /// NOTE: is private so that we can ensure upgraded before all typical accesses. - /// Direct storage APIs can still bypass this protection. - Nominators get(fn nominators): + pub Nominators get(fn nominators): linked_map hasher(blake2_256) T::AccountId => Option>; - /// Nominators for a particular account that is in action right now. You can't iterate - /// through validators here, but you can find them in the Session module. + /// The current era index. /// - /// This is keyed by the stash account. - pub Stakers get(fn stakers): - map hasher(blake2_256) T::AccountId => Exposure>; + /// This is the latest planned era, depending on how session module queues the validator + /// set, it might be active or not. + pub CurrentEra get(fn current_era): Option; - /// The currently elected validator set keyed by stash account ID. - pub CurrentElected get(fn current_elected): Vec; + /// The active era information, it holds index and start. + /// + /// The active era is the era currently rewarded. + /// Validator set of this era must be equal to `SessionInterface::validators`. + pub ActiveEra get(fn active_era): Option>>; - /// The current era index. - pub CurrentEra get(fn current_era) config(): EraIndex; + /// The session index at which the era start for the last `HISTORY_DEPTH` eras + pub ErasStartSessionIndex get(fn eras_start_session_index): + map hasher(blake2_256) EraIndex => Option; - /// The start of the current era. - pub CurrentEraStart get(fn current_era_start): MomentOf; + /// Exposure of validator at era. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + pub ErasStakers get(fn eras_stakers): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Exposure>; - /// The session index at which the current era started. - pub CurrentEraStartSessionIndex get(fn current_era_start_session_index): SessionIndex; + /// Clipped Exposure of validator at era. + /// + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduce to the + /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// This is used to limit the i/o cost for the nominator payout. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + pub ErasStakersClipped get(fn eras_stakers_clipped): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Exposure>; - /// Rewards for the current era. Using indices of current elected set. - CurrentEraPointsEarned get(fn current_era_reward): EraPoints; + /// Similarly to `ErasStakers` this holds the preferences of validators. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + // If prefs hasn't been set or has been removed then 0 commission is returned. + pub ErasValidatorPrefs get(fn eras_validator_prefs): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => ValidatorPrefs; - /// The amount of balance actively at stake for each validator slot, currently. + /// The total validator era payout for the last `HISTORY_DEPTH` eras. /// - /// This is used to derive rewards and punishments. - pub SlotStake get(fn slot_stake) build(|config: &GenesisConfig| { - config.stakers.iter().map(|&(_, _, value, _)| value).min().unwrap_or_default() - }): BalanceOf; + /// Eras that haven't finished yet or has been removed doesn't have reward. + pub ErasValidatorReward get(fn eras_validator_reward): + map hasher(blake2_256) EraIndex => Option>; + + /// Rewards for the last `HISTORY_DEPTH` eras. + /// If reward hasn't been set or has been removed then 0 reward is returned. + pub ErasRewardPoints get(fn eras_reward_points): + map hasher(blake2_256) EraIndex => EraRewardPoints; + + /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// If total hasn't been set or has been removed then 0 stake is returned. + pub ErasTotalStake get(fn eras_total_stake): + map hasher(blake2_256) EraIndex => BalanceOf; /// True if the next session change will be a new era regardless of index. pub ForceEra get(fn force_era) config(): Forcing; @@ -736,6 +820,9 @@ decl_storage! { map hasher(blake2_256) EraIndex => Vec>>; /// A mapping from still-bonded eras to the first session index of that era. + /// + /// Must contains information for eras for the range: + /// `[active_era - bounding_duration; active_era]` BondedEras: Vec<(EraIndex, SessionIndex)>; /// All slashing events on validators, mapped by era to the highest slash proportion @@ -760,6 +847,11 @@ decl_storage! { /// The earliest era for which we have a pending, unapplied slash. EarliestUnappliedSlash: Option; + + /// Storage version of the pallet. + /// + /// This is set to v2.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; } add_extra_genesis { config(stakers): @@ -797,9 +889,8 @@ decl_storage! { decl_event!( pub enum Event where Balance = BalanceOf, ::AccountId { - /// All validators have been rewarded by the first balance; the second is the remainder - /// from the maximum amount of reward. - Reward(Balance, Balance), + /// The staker has been rewarded by this amount. AccountId is controller account. + Reward(AccountId, Balance), /// One validator (and its nominators) has been slashed by the given amount. Slash(AccountId, Balance), /// An old slashing report from a prior era was discarded because it could @@ -831,6 +922,12 @@ decl_error! { NoMoreChunks, /// Can not rebond without unlocking chunks. NoUnlockChunk, + /// Attempting to target a stash that still has funds. + FundedTarget, + /// Invalid era to reward. + InvalidEraToReward, + /// Invalid number of nominations. + InvalidNumberOfNominations, } } @@ -846,14 +943,17 @@ decl_module! { fn deposit_event() = default; - fn on_initialize() { - Self::ensure_storage_upgraded(); + fn on_runtime_upgrade() { + migration::on_runtime_upgrade::(); } fn on_finalize() { // Set the start of the first era. - if !>::exists() { - >::put(T::Time::now()); + if let Some(mut active_era) = Self::active_era() { + if active_era.start.is_none() { + active_era.start = Some(T::Time::now()); + >::put(active_era); + } } } @@ -900,9 +1000,17 @@ decl_module! { >::insert(&stash, &controller); >::insert(&stash, payee); + system::Module::::inc_ref(&stash); + let stash_balance = T::Currency::free_balance(&stash); let value = value.min(stash_balance); - let item = StakingLedger { stash, total: value, active: value, unlocking: vec![] }; + let item = StakingLedger { + stash, + total: value, + active: value, + unlocking: vec![], + last_reward: Self::current_era(), + }; Self::update_ledger(&controller, &item); } @@ -957,7 +1065,8 @@ decl_module! { /// - Contains a limited number of reads. /// - Each call (requires the remainder of the bonded balance to be above `minimum_balance`) /// will cause a new entry to be inserted into a vector (`Ledger.unlocking`) kept in storage. - /// The only way to clean the aforementioned storage item is also user-controlled via `withdraw_unbonded`. + /// The only way to clean the aforementioned storage item is also user-controlled via + /// `withdraw_unbonded`. /// - One DB entry. /// #[weight = SimpleDispatchInfo::FixedNormal(400_000)] @@ -980,7 +1089,8 @@ decl_module! { ledger.active = Zero::zero(); } - let era = Self::current_era() + T::BondingDuration::get(); + // Note: in case there is no current era it is fine to bond one era more. + let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); ledger.unlocking.push(UnlockChunk { value, era }); Self::update_ledger(&controller, &ledger); } @@ -1005,18 +1115,20 @@ decl_module! { #[weight = SimpleDispatchInfo::FixedNormal(400_000)] fn withdraw_unbonded(origin) { let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let ledger = ledger.consolidate_unlocked(Self::current_era()); + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + if let Some(current_era) = Self::current_era() { + ledger = ledger.consolidate_unlocked(current_era) + } if ledger.unlocking.is_empty() && ledger.active.is_zero() { // This account must have called `unbond()` with some value that caused the active // portion to fall below existential deposit + will have no more unlocking chunks // left. We can now safely remove this. let stash = ledger.stash; + // remove all staking-related information. + Self::kill_stash(&stash)?; // remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); - // remove all staking-related information. - Self::kill_stash(&stash); } else { // This was the consequence of a partial unbond. just update the ledger and move on. Self::update_ledger(&controller, &ledger); @@ -1036,8 +1148,6 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(750_000)] fn validate(origin, prefs: ValidatorPrefs) { - Self::ensure_storage_upgraded(); - let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1058,8 +1168,6 @@ decl_module! { /// # #[weight = SimpleDispatchInfo::FixedNormal(750_000)] fn nominate(origin, targets: Vec<::Source>) { - Self::ensure_storage_upgraded(); - let controller = ensure_signed(origin)?; let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; let stash = &ledger.stash; @@ -1071,7 +1179,8 @@ decl_module! { let nominations = Nominations { targets, - submitted_in: Self::current_era(), + // initial nominations are considered submitted at era 0. See `Nominations` doc + submitted_in: Self::current_era().unwrap_or(0), suppressed: false, }; @@ -1187,10 +1296,11 @@ decl_module! { fn force_unstake(origin, stash: T::AccountId) { ensure_root(origin)?; + // remove all staking-related information. + Self::kill_stash(&stash)?; + // remove the lock. T::Currency::remove_lock(STAKING_ID, &stash); - // remove all staking-related information. - Self::kill_stash(&stash); } /// Force there to be a new era at the end of sessions indefinitely. @@ -1238,6 +1348,58 @@ decl_module! { ::UnappliedSlashes::insert(&era, &unapplied); } + /// Make one nominator's payout for one era. + /// + /// - `who` is the controller account of the nominator to pay out. + /// - `era` may not be lower than one following the most recently paid era. If it is higher, + /// then it indicates an instruction to skip the payout of all previous eras. + /// - `validators` is the list of all validators that `who` had exposure to during `era`. + /// If it is incomplete, then less than the full reward will be paid out. + /// It must not exceed `MAX_NOMINATIONS`. + /// + /// WARNING: once an era is payed for a validator such validator can't claim the payout of + /// previous era. + /// + /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. + /// + /// # + /// - Number of storage read of `O(validators)`; `validators` is the argument of the call, + /// and is bounded by `MAX_NOMINATIONS`. + /// - Each storage read is `O(N)` size and decode complexity; `N` is the maximum + /// nominations that can be given to a single validator. + /// - Computation complexity: `O(MAX_NOMINATIONS * logN)`; `MAX_NOMINATIONS` is the + /// maximum number of validators that may be nominated by a single nominator, it is + /// bounded only economically (all nominators are required to place a minimum stake). + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000)] + fn payout_nominator(origin, era: EraIndex, validators: Vec<(T::AccountId, u32)>) + -> DispatchResult + { + let who = ensure_signed(origin)?; + Self::do_payout_nominator(who, era, validators) + } + + /// Make one validator's payout for one era. + /// + /// - `who` is the controller account of the validator to pay out. + /// - `era` may not be lower than one following the most recently paid era. If it is higher, + /// then it indicates an instruction to skip the payout of all previous eras. + /// + /// WARNING: once an era is payed for a validator such validator can't claim the payout of + /// previous era. + /// + /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. + /// + /// # + /// - Time complexity: O(1). + /// - Contains a limited number of reads and writes. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000)] + fn payout_validator(origin, era: EraIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_payout_validator(who, era) + } + /// Rebond a portion of the stash scheduled to be unlocked. /// /// # @@ -1254,9 +1416,39 @@ decl_module! { ); let ledger = ledger.rebond(value); - Self::update_ledger(&controller, &ledger); } + + /// Set history_depth value. + /// + /// Origin must be root. + #[weight = SimpleDispatchInfo::FixedOperational(500_000)] + fn set_history_depth(origin, #[compact] new_history_depth: EraIndex) { + ensure_root(origin)?; + if let Some(current_era) = Self::current_era() { + HistoryDepth::mutate(|history_depth| { + let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); + let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); + for era_index in last_kept..new_last_kept { + Self::clear_era_information(era_index); + } + *history_depth = new_history_depth + }) + } + } + + /// Remove all data structure concerning a staker/stash once its balance is zero. + /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone + /// and the target `stash` must have no funds left. + /// + /// This can be called from any origin. + /// + /// - `stash`: The stash account to reap. Its balance must be zero. + fn reap_stash(_origin, stash: T::AccountId) { + ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); + Self::kill_stash(&stash)?; + T::Currency::remove_lock(STAKING_ID, &stash); + } } } @@ -1270,6 +1462,111 @@ impl Module { // MUTABLES (DANGEROUS) + fn do_payout_nominator(who: T::AccountId, era: EraIndex, validators: Vec<(T::AccountId, u32)>) + -> DispatchResult + { + // validators len must not exceed `MAX_NOMINATIONS` to avoid querying more validator + // exposure than necessary. + if validators.len() > MAX_NOMINATIONS { + return Err(Error::::InvalidNumberOfNominations.into()); + } + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `nominator_ledger.last_reward` in this case. + let era_payout = >::get(&era) + .ok_or_else(|| Error::::InvalidEraToReward)?; + + let mut nominator_ledger = >::get(&who).ok_or_else(|| Error::::NotController)?; + + if nominator_ledger.last_reward.map(|last_reward| last_reward >= era).unwrap_or(false) { + return Err(Error::::InvalidEraToReward.into()); + } + + nominator_ledger.last_reward = Some(era); + >::insert(&who, &nominator_ledger); + + let mut reward = Perbill::zero(); + let era_reward_points = >::get(&era); + + for (validator, nominator_index) in validators.into_iter() { + let commission = Self::eras_validator_prefs(&era, &validator).commission; + let validator_exposure = >::get(&era, &validator); + + if let Some(nominator_exposure) = validator_exposure.others + .get(nominator_index as usize) + { + if nominator_exposure.who != nominator_ledger.stash { + continue; + } + + let nominator_exposure_part = Perbill::from_rational_approximation( + nominator_exposure.value, + validator_exposure.total, + ); + let validator_point = era_reward_points.individual.get(&validator) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + let validator_point_part = Perbill::from_rational_approximation( + validator_point, + era_reward_points.total, + ); + reward = reward.saturating_add( + validator_point_part + .saturating_mul(Perbill::one().saturating_sub(commission)) + .saturating_mul(nominator_exposure_part) + ); + } + } + + if let Some(imbalance) = Self::make_payout(&nominator_ledger.stash, reward * era_payout) { + Self::deposit_event(RawEvent::Reward(who, imbalance.peek())); + } + + Ok(()) + } + + fn do_payout_validator(who: T::AccountId, era: EraIndex) -> DispatchResult { + // Note: if era has no reward to be claimed, era may be future. better not to update + // `ledger.last_reward` in this case. + let era_payout = >::get(&era) + .ok_or_else(|| Error::::InvalidEraToReward)?; + + let mut ledger = >::get(&who).ok_or_else(|| Error::::NotController)?; + if ledger.last_reward.map(|last_reward| last_reward >= era).unwrap_or(false) { + return Err(Error::::InvalidEraToReward.into()); + } + + ledger.last_reward = Some(era); + >::insert(&who, &ledger); + + let era_reward_points = >::get(&era); + let commission = Self::eras_validator_prefs(&era, &ledger.stash).commission; + let exposure = >::get(&era, &ledger.stash); + + let exposure_part = Perbill::from_rational_approximation( + exposure.own, + exposure.total, + ); + let validator_point = era_reward_points.individual.get(&ledger.stash) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + let validator_point_part = Perbill::from_rational_approximation( + validator_point, + era_reward_points.total, + ); + let reward = validator_point_part.saturating_mul( + commission.saturating_add( + Perbill::one().saturating_sub(commission).saturating_mul(exposure_part) + ) + ); + + if let Some(imbalance) = Self::make_payout(&ledger.stash, reward * era_payout) { + Self::deposit_event(RawEvent::Reward(who, imbalance.peek())); + } + + Ok(()) + } + /// Update the ledger for a controller. This will also update the stash lock. The lock will /// will lock the entire funds except paying for further transactions. fn update_ledger( @@ -1291,12 +1588,6 @@ impl Module { >::remove(stash); } - /// Ensures storage is upgraded to most recent necessary state. - /// - /// Right now it's a no-op as all networks that are supported by Substrate Frame Core are - /// running with the latest staking storage scheme. - fn ensure_storage_upgraded() {} - /// Actually make a payment to a staker. This uses the currency's reward function /// to pay the right payee for the given staker account. fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { @@ -1320,113 +1611,90 @@ impl Module { } } - /// Reward a given validator by a specific amount. Add the reward to the validator's, and its - /// nominators' balance, pro-rata based on their exposure, after having removed the validator's - /// pre-payout cut. - fn reward_validator(stash: &T::AccountId, reward: BalanceOf) -> PositiveImbalanceOf { - let off_the_table = Self::validators(stash).commission * reward; - let reward = reward.saturating_sub(off_the_table); - let mut imbalance = >::zero(); - let validator_cut = if reward.is_zero() { - Zero::zero() - } else { - let exposure = Self::stakers(stash); - let total = exposure.total.max(One::one()); - - for i in &exposure.others { - let per_u64 = Perbill::from_rational_approximation(i.value, total); - imbalance.maybe_subsume(Self::make_payout(&i.who, per_u64 * reward)); + /// Plan a new session potentially trigger a new era. + fn new_session(session_index: SessionIndex) -> Option> { + if let Some(current_era) = Self::current_era() { + // Initial era has been set. + + let current_era_start_session_index = Self::eras_start_session_index(current_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + + let era_length = session_index.checked_sub(current_era_start_session_index) + .unwrap_or(0); // Must never happen. + + match ForceEra::get() { + Forcing::ForceNew => ForceEra::kill(), + Forcing::ForceAlways => (), + Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), + _ => return None, } - let per_u64 = Perbill::from_rational_approximation(exposure.own, total); - per_u64 * reward - }; - - imbalance.maybe_subsume(Self::make_payout(stash, validator_cut + off_the_table)); - - imbalance - } - - /// Session has just ended. Provide the validator set for the next session if it's an era-end. - fn new_session(session_index: SessionIndex) -> Option> { - let era_length = session_index.checked_sub(Self::current_era_start_session_index()).unwrap_or(0); - match ForceEra::get() { - Forcing::ForceNew => ForceEra::kill(), - Forcing::ForceAlways => (), - Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), - _ => return None, + Self::new_era(session_index) + } else { + // Set initial era + Self::new_era(session_index) } - - Self::new_era(session_index) } - /// Initialize the first session (and consequently the first era) - fn initial_session() -> Option> { - // note: `CurrentEraStart` is set in `on_finalize` of the first block because now is not - // available yet. - CurrentEraStartSessionIndex::put(0); - BondedEras::mutate(|bonded| bonded.push((0, 0))); - Self::select_validators().1 + /// Start a session potentially starting an era. + fn start_session(start_session: SessionIndex) { + let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); + if let Some(next_active_era_start_session_index) = + Self::eras_start_session_index(next_active_era) + { + if next_active_era_start_session_index == start_session { + Self::start_era(start_session); + } else if next_active_era_start_session_index < start_session { + // This arm should never happen, but better handle it than to stall the + // staking pallet. + frame_support::print("Warning: A session appears to have been skipped."); + Self::start_era(start_session); + } + } } - /// The era has changed - enact new staking set. - /// - /// NOTE: This always happens immediately before a session change to ensure that new validators - /// get a chance to set their session keys. - fn new_era(start_session_index: SessionIndex) -> Option> { - // Payout - let points = CurrentEraPointsEarned::take(); - let now = T::Time::now(); - let previous_era_start = >::mutate(|v| { - sp_std::mem::replace(v, now) - }); - let era_duration = now - previous_era_start; - if !era_duration.is_zero() { - let validators = Self::current_elected(); - - let validator_len: BalanceOf = (validators.len() as u32).into(); - let total_rewarded_stake = Self::slot_stake() * validator_len; - - let (total_payout, max_payout) = inflation::compute_total_payout( - &T::RewardCurve::get(), - total_rewarded_stake.clone(), - T::Currency::total_issuance(), - // Duration of era; more than u64::MAX is rewarded as u64::MAX. - era_duration.saturated_into::(), - ); - - let mut total_imbalance = >::zero(); + /// End a session potentially ending an era. + fn end_session(session_index: SessionIndex) { + if let Some(active_era) = Self::active_era() { + let next_active_era_start_session_index = + Self::eras_start_session_index(active_era.index + 1) + .unwrap_or_else(|| { + frame_support::print( + "Error: start_session_index must be set for active_era + 1" + ); + 0 + }); - for (v, p) in validators.iter().zip(points.individual.into_iter()) { - if p != 0 { - let reward = Perbill::from_rational_approximation(p, points.total) * total_payout; - total_imbalance.subsume(Self::reward_validator(v, reward)); - } + if next_active_era_start_session_index == session_index + 1 { + Self::end_era(active_era, session_index); } - - // assert!(total_imbalance.peek() == total_payout) - let total_payout = total_imbalance.peek(); - - let rest = max_payout.saturating_sub(total_payout); - Self::deposit_event(RawEvent::Reward(total_payout, rest)); - - T::Reward::on_unbalanced(total_imbalance); - T::RewardRemainder::on_unbalanced(T::Currency::issue(rest)); } + } - // Increment current era. - let current_era = CurrentEra::mutate(|s| { *s += 1; *s }); - - CurrentEraStartSessionIndex::mutate(|v| { - *v = start_session_index; + /// * Increment `active_era.index`, + /// * reset `active_era.start`, + /// * update `BondedEras` and apply slashes. + fn start_era(start_session: SessionIndex) { + let active_era = >::mutate(|active_era| { + let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); + *active_era = Some(ActiveEraInfo { + index: new_index, + // Set new active era start in next `on_finalize`. To guarantee usage of `Time` + start: None, + }); + new_index }); + let bonding_duration = T::BondingDuration::get(); BondedEras::mutate(|bonded| { - bonded.push((current_era, start_session_index)); + bonded.push((active_era, start_session)); - if current_era > bonding_duration { - let first_kept = current_era - bonding_duration; + if active_era > bonding_duration { + let first_kept = active_era - bonding_duration; // prune out everything that's from before the first-kept index. let n_to_prune = bonded.iter() @@ -1444,18 +1712,65 @@ impl Module { } }); - // Reassign all Stakers. - let (_slot_stake, maybe_new_validators) = Self::select_validators(); - Self::apply_unapplied_slashes(current_era); + Self::apply_unapplied_slashes(active_era); + } + + /// Compute payout for era. + fn end_era(active_era: ActiveEraInfo>, _session_index: SessionIndex) { + // Note: active_era_start can be None if end era is called during genesis config. + if let Some(active_era_start) = active_era.start { + let now = T::Time::now(); + + let era_duration = now - active_era_start; + let (total_payout, _max_payout) = inflation::compute_total_payout( + &T::RewardCurve::get(), + Self::eras_total_stake(&active_era.index), + T::Currency::total_issuance(), + // Duration of era; more than u64::MAX is rewarded as u64::MAX. + era_duration.saturated_into::(), + ); + + // Set ending era reward. + >::insert(&active_era.index, total_payout); + } + } + + /// Plan a new era. Return the potential new staking set. + fn new_era(start_session_index: SessionIndex) -> Option> { + // Increment or set current era. + let current_era = CurrentEra::mutate(|s| { + *s = Some(s.map(|s| s + 1).unwrap_or(0)); + s.unwrap() + }); + ErasStartSessionIndex::insert(¤t_era, &start_session_index); + + // Clean old era information. + if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { + Self::clear_era_information(old_era); + } + + // Set staking information for new era. + let maybe_new_validators = Self::select_validators(current_era); maybe_new_validators } + /// Clear all era information for given era. + fn clear_era_information(era_index: EraIndex) { + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove(era_index); + >::remove(era_index); + >::remove(era_index); + ErasStartSessionIndex::remove(era_index); + } + /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. - fn apply_unapplied_slashes(current_era: EraIndex) { + fn apply_unapplied_slashes(active_era: EraIndex) { let slash_defer_duration = T::SlashDeferDuration::get(); ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { - let keep_from = current_era.saturating_sub(slash_defer_duration); + let keep_from = active_era.saturating_sub(slash_defer_duration); for era in (*earliest)..keep_from { let era_slashes = ::UnappliedSlashes::take(&era); for slash in era_slashes { @@ -1467,19 +1782,25 @@ impl Module { }) } - /// Select a new validator set from the assembled stakers and their role preferences. + /// Select a new validator set from the assembled stakers and their role preferences, and store + /// staking information for the new current era. + /// + /// Fill the storages `ErasStakers`, `ErasStakersClipped`, `ErasValidatorPrefs` and + /// `ErasTotalStake` for current era. /// - /// Returns the new `SlotStake` value and a set of newly selected _stash_ IDs. + /// Returns a set of newly selected _stash_ IDs. /// /// Assumes storage is coherent with the declaration. - fn select_validators() -> (BalanceOf, Option>) { + fn select_validators(current_era: EraIndex) -> Option> { let mut all_nominators: Vec<(T::AccountId, Vec)> = Vec::new(); - let all_validator_candidates_iter = >::enumerate(); - let all_validators = all_validator_candidates_iter.map(|(who, _pref)| { - let self_vote = (who.clone(), vec![who.clone()]); + let mut all_validators_and_prefs = BTreeMap::new(); + let mut all_validators = Vec::new(); + for (validator, preference) in >::enumerate() { + let self_vote = (validator.clone(), vec![validator.clone()]); all_nominators.push(self_vote); - who - }).collect::>(); + all_validators_and_prefs.insert(validator.clone(), preference); + all_validators.push(validator); + } let nominator_votes = >::enumerate().map(|(nominator, nominations)| { let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; @@ -1506,8 +1827,8 @@ impl Module { ); if let Some(phragmen_result) = maybe_phragmen_result { - let elected_stashes = phragmen_result.winners.iter() - .map(|(s, _)| s.clone()) + let elected_stashes = phragmen_result.winners.into_iter() + .map(|(s, _)| s) .collect::>(); let assignments = phragmen_result.assignments; @@ -1520,13 +1841,8 @@ impl Module { Self::slashable_balance_of, ); - // Clear Stakers. - for v in Self::current_elected().iter() { - >::remove(v); - } - - // Populate Stakers and figure out the minimum stake behind a slot. - let mut slot_stake = BalanceOf::::max_value(); + // Populate stakers information and figure out the total stake. + let mut total_staked = BalanceOf::::zero(); for (c, s) in supports.into_iter() { // build `struct exposure` from `support` let mut others = Vec::new(); @@ -1543,6 +1859,9 @@ impl Module { } total = total.saturating_add(value); }); + + total_staked = total_staked.saturating_add(total); + let exposure = Exposure { own, others, @@ -1552,24 +1871,31 @@ impl Module { // we simulate it in some tests. total, }; + >::insert(¤t_era, &c, &exposure); - if exposure.total < slot_stake { - slot_stake = exposure.total; + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); } - >::insert(&c, exposure.clone()); + >::insert(¤t_era, &c, exposure_clipped); } - // Update slot stake. - >::put(&slot_stake); - - // Set the new validator set in sessions. - >::put(&elected_stashes); + // Insert current era staking informations + >::insert(¤t_era, total_staked); + let default_pref = ValidatorPrefs::default(); + for stash in &elected_stashes { + let pref = all_validators_and_prefs.get(stash) + .unwrap_or(&default_pref); // Must never happen, but better to be safe. + >::insert(¤t_era, stash, pref); + } // In order to keep the property required by `n_session_ending` // that we must return the new validator set even if it's the same as the old, // as long as any underlying economic conditions have changed, we don't attempt // to do any optimization where we compare against the prior set. - (slot_stake, Some(elected_stashes)) + Some(elected_stashes) } else { // There were not enough candidates for even our minimal level of functionality. // This is bad. @@ -1577,7 +1903,7 @@ impl Module { // and let the chain keep producing blocks until we can decide on a sufficiently // substantial set. // TODO: #2494 - (Self::slot_stake(), None) + None } } @@ -1585,18 +1911,22 @@ impl Module { /// /// Assumes storage is upgraded before calling. /// - /// This is called : - /// - Immediately when an account's balance falls below existential deposit. + /// This is called: /// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance. - fn kill_stash(stash: &T::AccountId) { - if let Some(controller) = >::take(stash) { - >::remove(&controller); - } + /// - through `reap_stash()` if the balance has fallen to zero (through slashing). + fn kill_stash(stash: &T::AccountId) -> DispatchResult { + let controller = Bonded::::take(stash).ok_or(Error::::NotStash)?; + >::remove(&controller); + >::remove(stash); >::remove(stash); >::remove(stash); slashing::clear_stash_metadata::(stash); + + system::Module::::dec_ref(stash); + + Ok(()) } /// Add reward points to validators using their stash account ID. @@ -1611,33 +1941,17 @@ impl Module { /// /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. /// If you need to reward lots of validator consider using `reward_by_indices`. - pub fn reward_by_ids(validators_points: impl IntoIterator) { - CurrentEraPointsEarned::mutate(|rewards| { - let current_elected = >::current_elected(); - for (validator, points) in validators_points.into_iter() { - if let Some(index) = current_elected.iter() - .position(|elected| *elected == validator) - { - rewards.add_points_to_index(index as u32, points); - } - } - }); - } - - /// Add reward points to validators using their validator index. - /// - /// For each element in the iterator the given number of points in u32 is added to the - /// validator, thus duplicates are handled. - pub fn reward_by_indices(validators_points: impl IntoIterator) { - let current_elected_len = >::current_elected().len() as u32; - - CurrentEraPointsEarned::mutate(|rewards| { - for (validator_index, points) in validators_points.into_iter() { - if validator_index < current_elected_len { - rewards.add_points_to_index(validator_index, points); + pub fn reward_by_ids( + validators_points: impl IntoIterator + ) { + if let Some(active_era) = Self::active_era() { + >::mutate(active_era.index, |era_rewards| { + for (validator, points) in validators_points.into_iter() { + *era_rewards.individual.entry(validator).or_default() += points; + era_rewards.total += points; } - } - }); + }); + } } /// Ensures that at the end of the current session there will be a new era. @@ -1649,47 +1963,58 @@ impl Module { } } +/// In this implementation `new_session(session)` must be called before `end_session(session-1)` +/// i.e. the new session must be planned before the ending of the previous session. +/// +/// Once the first new_session is planned, all session must start and then end in order, though +/// some session can lag in between the newest session planned and the latest session started. impl pallet_session::SessionManager for Module { fn new_session(new_index: SessionIndex) -> Option> { - Self::ensure_storage_upgraded(); - if new_index == 0 { - return Self::initial_session(); - } - Self::new_session(new_index - 1) + Self::new_session(new_index) + } + fn start_session(start_index: SessionIndex) { + Self::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + Self::end_session(end_index) } - fn end_session(_end_index: SessionIndex) {} } +/// This implementation has the same constrains as the implementation of +/// `pallet_session::SessionManager`. impl SessionManager>> for Module { fn new_session(new_index: SessionIndex) -> Option>)>> { >::new_session(new_index).map(|validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + validators.into_iter().map(|v| { - let exposure = >::get(&v); + let exposure = Self::eras_stakers(current_era, &v); (v, exposure) }).collect() }) } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } fn end_session(end_index: SessionIndex) { >::end_session(end_index) } } -impl OnReapAccount for Module { - fn on_reap_account(stash: &T::AccountId) { - Self::ensure_storage_upgraded(); - Self::kill_stash(stash); - } -} - /// Add reward points to block authors: /// * 20 points to the block producer for producing a (non-uncle) block in the relay chain, /// * 2 points to the block producer for each reference to a previously unreferenced uncle, and /// * 1 point to the producer of each referenced uncle block. -impl pallet_authorship::EventHandler for Module { +impl pallet_authorship::EventHandler for Module + where + T: Trait + pallet_authorship::Trait + pallet_session::Trait +{ fn note_author(author: T::AccountId) { - Self::reward_by_ids(vec![(author, 20)]); + Self::reward_by_ids(vec![(author, 20)]) } fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { Self::reward_by_ids(vec![ @@ -1709,15 +2034,22 @@ impl Convert> for StashOf { } } -/// A typed conversion from stash account ID to the current exposure of nominators +/// A typed conversion from stash account ID to the active exposure of nominators /// on that account. +/// +/// Active exposure is the exposure of the validator set currently validating, i.e. in +/// `active_era`. It can differ from the latest planned exposure in `current_era`. pub struct ExposureOf(sp_std::marker::PhantomData); impl Convert>>> for ExposureOf { fn convert(validator: T::AccountId) -> Option>> { - Some(>::stakers(&validator)) + if let Some(active_era) = >::active_era() { + Some(>::eras_stakers(active_era.index, &validator)) + } else { + None + } } } @@ -1737,17 +2069,27 @@ impl OnOffenceHandler>::ensure_storage_upgraded(); - let reward_proportion = SlashRewardFraction::get(); - let era_now = Self::current_era(); - let window_start = era_now.saturating_sub(T::BondingDuration::get()); - let current_era_start_session = CurrentEraStartSessionIndex::get(); + let active_era = { + let active_era = Self::active_era(); + if active_era.is_none() { + return + } + active_era.unwrap().index + }; + let active_era_start_session_index = Self::eras_start_session_index(active_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + + let window_start = active_era.saturating_sub(T::BondingDuration::get()); - // fast path for current-era report - most likely. - let slash_era = if slash_session >= current_era_start_session { - era_now + // fast path for active-era report - most likely. + // `slash_session` cannot be in a future active era. It must be in `active_era` or before. + let slash_era = if slash_session >= active_era_start_session_index { + active_era } else { let eras = BondedEras::get(); @@ -1760,7 +2102,7 @@ impl OnOffenceHandler::EarliestUnappliedSlash::mutate(|earliest| { if earliest.is_none() { - *earliest = Some(era_now) + *earliest = Some(active_era) } }); @@ -1781,7 +2123,7 @@ impl OnOffenceHandler OnOffenceHandler::UnappliedSlashes::mutate( - era_now, + active_era, move |for_later| for_later.push(unapplied), ); } @@ -1813,9 +2155,7 @@ impl ReportOffence R: ReportOffence, O: Offence, { - fn report_offence(reporters: Vec, offence: O) { - >::ensure_storage_upgraded(); - + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { // disallow any slashing from before the current bonding period. let offence_session = offence.session_index(); let bonded_eras = BondedEras::get(); @@ -1825,7 +2165,8 @@ impl ReportOffence } else { >::deposit_event( RawEvent::OldSlashingReportDiscarded(offence_session) - ) + ); + Ok(()) } } } diff --git a/frame/staking/src/migration/deprecated.rs b/frame/staking/src/migration/deprecated.rs new file mode 100644 index 0000000000000000000000000000000000000000..41cf6652291e1835355a170598f16ec3c5dfe527 --- /dev/null +++ b/frame/staking/src/migration/deprecated.rs @@ -0,0 +1,73 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +/// Deprecated storages used for migration from v1.0.0 to v2.0.0 only. + +use crate::{Trait, BalanceOf, MomentOf, SessionIndex, Exposure, UnlockChunk}; +use codec::{Encode, Decode, HasCompact}; +use frame_support::{decl_module, decl_storage}; +use sp_std::prelude::*; + +/// Reward points of an era. Used to split era total payout between validators. +#[derive(Encode, Decode, Default)] +pub struct EraPoints { + /// Total number of points. Equals the sum of reward points for each validator. + pub total: u32, + /// The reward points earned by a given validator. The index of this vec corresponds to the + /// index into the current validator set. + pub individual: Vec, +} + +#[derive(Encode, Decode)] +pub struct OldStakingLedger { + pub stash: AccountId, + #[codec(compact)] + pub total: Balance, + #[codec(compact)] + pub active: Balance, + pub unlocking: Vec>, +} + +decl_module! { + pub struct Module for enum Call where origin: T::Origin { } +} + +decl_storage! { + pub trait Store for Module as Staking { + pub SlotStake: BalanceOf; + + /// The currently elected validator set keyed by stash account ID. + pub CurrentElected: Vec; + + /// The start of the current era. + pub CurrentEraStart: MomentOf; + + /// The session index at which the current era started. + pub CurrentEraStartSessionIndex: SessionIndex; + + /// Rewards for the current era. Using indices of current elected set. + pub CurrentEraPointsEarned: EraPoints; + + /// Nominators for a particular account that is in action right now. You can't iterate + /// through validators here, but you can find them in the Session module. + /// + /// This is keyed by the stash account. + pub Stakers: map hasher(blake2_256) T::AccountId => Exposure>; + + /// Old upgrade flag. + pub IsUpgraded: bool; + } +} diff --git a/frame/staking/src/migration/mod.rs b/frame/staking/src/migration/mod.rs new file mode 100644 index 0000000000000000000000000000000000000000..971e409189188654b79df2029c5857049e0b8f25 --- /dev/null +++ b/frame/staking/src/migration/mod.rs @@ -0,0 +1,119 @@ +// Copyright 2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Update storage from v1.0.0 to v2.0.0 +//! +//! In old version the staking module has several issue about handling session delay, the +//! current era was always considered the active one. +//! +//! After the migration the current era will still be considered the active one for the era of +//! the upgrade. And the delay issue will be fixed when planning the next era. +// * create: +// * ActiveEraStart +// * ErasRewardPoints +// * ActiveEra +// * ErasStakers +// * ErasStakersClipped +// * ErasValidatorPrefs +// * ErasTotalStake +// * ErasStartSessionIndex +// * translate StakingLedger +// * removal of: +// * Stakers +// * SlotStake +// * CurrentElected +// * CurrentEraStart +// * CurrentEraStartSessionIndex +// * CurrentEraPointsEarned + +use super::*; +mod deprecated; +#[cfg(test)] +mod tests; +#[cfg(test)] +mod test_upgrade_from_master_dataset; + +pub fn on_runtime_upgrade() { + match StorageVersion::get() { + Releases::V2_0_0 => return, + Releases::V1_0_0 => upgrade_v1_to_v2::(), + } +} + +fn upgrade_v1_to_v2() { + deprecated::IsUpgraded::kill(); + + let current_era_start_index = deprecated::CurrentEraStartSessionIndex::get(); + let current_era = as Store>::CurrentEra::get().unwrap_or(0); + let current_era_start = deprecated::CurrentEraStart::::get(); + as Store>::ErasStartSessionIndex::insert(current_era, current_era_start_index); + as Store>::ActiveEra::put(ActiveEraInfo { + index: current_era, + start: Some(current_era_start), + }); + + let current_elected = deprecated::CurrentElected::::get(); + let mut current_total_stake = >::zero(); + for validator in ¤t_elected { + let exposure = deprecated::Stakers::::get(validator); + current_total_stake += exposure.total; + as Store>::ErasStakers::insert(current_era, validator, &exposure); + + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); + } + as Store>::ErasStakersClipped::insert(current_era, validator, exposure_clipped); + + let pref = as Store>::Validators::get(validator); + as Store>::ErasValidatorPrefs::insert(current_era, validator, pref); + } + as Store>::ErasTotalStake::insert(current_era, current_total_stake); + + let points = deprecated::CurrentEraPointsEarned::get(); + as Store>::ErasRewardPoints::insert(current_era, EraRewardPoints { + total: points.total, + individual: current_elected.iter().cloned().zip(points.individual.iter().cloned()).collect(), + }); + + let res = as Store>::Ledger::translate_values( + |old: deprecated::OldStakingLedger>| StakingLedger { + stash: old.stash, + total: old.total, + active: old.active, + unlocking: old.unlocking, + last_reward: None, + } + ); + if let Err(e) = res { + frame_support::print("Encountered error in migration of Staking::Ledger map."); + frame_support::print("The number of removed key/value is:"); + frame_support::print(e); + } + + + // Kill old storages + deprecated::Stakers::::remove_all(); + deprecated::SlotStake::::kill(); + deprecated::CurrentElected::::kill(); + deprecated::CurrentEraStart::::kill(); + deprecated::CurrentEraStartSessionIndex::kill(); + deprecated::CurrentEraPointsEarned::kill(); + + StorageVersion::put(Releases::V2_0_0); +} diff --git a/frame/staking/src/migration/test_upgrade_from_master_dataset.rs b/frame/staking/src/migration/test_upgrade_from_master_dataset.rs new file mode 100644 index 0000000000000000000000000000000000000000..32f9b0a3edb968210f8a49957c26efe2ff4676a7 --- /dev/null +++ b/frame/staking/src/migration/test_upgrade_from_master_dataset.rs @@ -0,0 +1,59 @@ +// Copyright 2020-2020 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +//! Data set for testing update from previous staking module. +//! +//! Each data set correspond to the storage state for its corresponding block just before the +//! staking initialisation. +//! +//! it has been generated using the following code at commit +//! dc92587bea4032e0a0fc96785bfd9aa17c95459e +//! +//! ```nocompile +//! fn print_storage(i: u32) { +//! let mut storage = vec![]; +//! let mut current_key = vec![]; +//! while let Some(key) = sp_io::storage::next_key(¤t_key) { +//! storage.push((key.clone(), sp_io::storage::get(&key).unwrap())); +//! current_key = key; +//! } +//! println!("const _{}: &[(&[u8], &[u8])] = {:?};", i, storage); +//! } +//! +//! #[test] +//! fn get_states() { +//! let mut ext = ExtBuilder::default().build(); +//! +//! for index in 1..10u32 { +//! ext.execute_with(|| { +//! print_storage(index - 1); +//! System::set_block_number((index).into()); +//! Timestamp::set_timestamp(System::block_number() * 1000); +//! Session::on_initialize(System::block_number()); +//! }); +//! } +//! } +//! ``` + +pub const _0: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[15, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[60, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 2, 0, 0, 0])]; +pub const _1: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[16, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[64, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[1, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 3, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[232, 3, 0, 0, 0, 0, 0, 0])]; +pub const _2: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[2, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[17, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[68, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[4, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 4, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[208, 7, 0, 0, 0, 0, 0, 0])]; +pub const _3: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[3, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[19, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[76, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[3, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 5, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[184, 11, 0, 0, 0, 0, 0, 0])]; +pub const _4: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[4, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[20, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[4, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 6, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[160, 15, 0, 0, 0, 0, 0, 0])]; +pub const _5: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[5, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[21, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[1, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[184, 11, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[8, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[5, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 7, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[136, 19, 0, 0, 0, 0, 0, 0])]; +pub const _6: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[6, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[23, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[92, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[6, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 8, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[112, 23, 0, 0, 0, 0, 0, 0])]; +pub const _7: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[7, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[24, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[7, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 157, 255, 135, 106, 75, 148, 45, 10, 151, 17, 209, 130, 33, 137, 143, 17, 202, 57, 117, 21, 137, 235, 244, 212, 157, 116, 159, 107, 62, 73, 50, 146], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 9, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[88, 27, 0, 0, 0, 0, 0, 0])]; +pub const _8: &[(&[u8], &[u8])] = &[(&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 2, 165, 193, 177, 154, 183, 160, 79, 83, 108, 81, 154, 202, 73, 131, 172], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 10, 152, 253, 190, 156, 230, 197, 88, 55, 87, 108, 96, 199, 175, 56, 80], &[25, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 128, 212, 30, 94, 22, 5, 103, 101, 188, 132, 97, 133, 16, 114, 201, 215], &[100, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 138, 66, 243, 51, 35, 203, 92, 237, 59, 68, 221, 130, 95, 218, 159, 204], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 164, 71, 4, 181, 104, 210, 22, 103, 53, 106, 90, 5, 12, 17, 135, 70, 129, 228, 122, 25, 230, 178, 155, 10, 101, 185, 89, 23, 98, 206, 81, 67, 237, 48, 208, 38, 30, 93, 36, 163, 32, 23, 82, 80, 107, 32, 241, 92], &[69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69, 69]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 29, 189, 125, 11, 86, 26, 65, 210, 60, 42, 70, 154, 212, 47, 189, 112, 213, 67, 139, 174, 130, 111, 111, 214, 7, 65, 49, 144, 195, 124, 54, 59], &[0, 0, 0, 0, 0, 0, 0, 0, 10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 100, 74, 81, 252, 33, 158, 145, 69, 184, 87, 123, 183, 118, 40, 118, 123, 28, 47, 115, 116, 142, 218, 139, 120, 139, 252, 102, 161, 181, 42, 32, 248], &[0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 165, 212, 232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 108, 221, 179, 103, 175, 189, 88, 59, 180, 143, 155, 189, 125, 91, 163, 177, 208, 115, 139, 72, 129, 177, 205, 221, 56, 22, 149, 38, 216, 21, 129, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 44, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0, 232, 3, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 232, 139, 67, 253, 237, 99, 35, 239, 2, 255, 239, 251, 216, 196, 8, 70, 238, 9, 191, 49, 98, 113, 189, 34, 54, 150, 89, 201, 89, 221, 115, 58], &[0, 0, 0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 233, 103, 96, 210, 116, 101, 58, 57, 180, 41, 168, 126, 186, 174, 157, 58, 164, 253, 245, 139, 144, 150, 207, 11, 235, 199, 196, 229, 164, 194, 237, 141], &[0, 0, 0, 0, 0, 0, 0, 0, 144, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[38, 170, 57, 78, 234, 86, 48, 224, 124, 72, 174, 12, 149, 88, 206, 247, 185, 157, 136, 14, 198, 129, 121, 156, 12, 243, 14, 136, 134, 55, 29, 169, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0, 0, 0, 0, 0, 0, 0, 0, 208, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0, 244, 1, 0, 0, 0, 0, 0, 0]), (&[58, 99, 111, 100, 101], &[]), (&[58, 101, 120, 116, 114, 105, 110, 115, 105, 99, 95, 105, 110, 100, 101, 120], &[0, 0, 0, 0]), (&[58, 104, 101, 97, 112, 112, 97, 103, 101, 115], &[8, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 11, 106, 69, 50, 30, 250, 233, 42, 234, 21, 224, 116, 14, 199, 175, 231], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 14, 160, 236, 172, 118, 69, 125, 15, 155, 57, 185, 129, 221, 16, 112, 18], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 19, 142, 113, 97, 36, 145, 25, 45, 104, 222, 171, 126, 111, 86, 63, 225], &[2, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 40, 220, 203, 85, 155, 149, 196, 1, 104, 161, 178, 105, 101, 129, 181, 167], &[0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[40, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[10, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[30, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[20, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 62, 209, 75, 69, 237, 32, 208, 84, 240, 94, 55, 226, 84, 44, 254, 112, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[100, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 94, 90, 15, 75, 176, 234, 179, 121, 134, 37, 171, 24, 100, 84, 93, 19, 212, 232, 208, 104, 153, 217, 219, 77, 97, 219, 157, 238, 147, 186, 2, 221], &[21, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 165, 186, 85, 195, 163, 151, 212, 60, 23, 89, 63, 40, 154, 164, 151, 157, 10, 229, 146, 68, 179, 47, 3, 236, 19, 65, 97, 129, 175, 134, 209, 11], &[31, 0, 0, 0, 0, 0, 0, 0, 4, 4, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 189, 225, 131, 90, 186, 237, 15, 242, 121, 87, 0, 55, 252, 73, 213, 189, 23, 110, 180, 130, 163, 178, 129, 109, 244, 252, 194, 204, 27, 200, 106, 202], &[11, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 213, 92, 123, 109, 126, 139, 228, 10, 88, 51, 198, 4, 8, 188, 78, 184, 171, 234, 78, 93, 168, 211, 37, 233, 8, 135, 156, 195, 59, 151, 205, 153], &[101, 0, 0, 0, 0, 0, 0, 0, 209, 7, 209, 7, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 66, 42, 219, 87, 159, 29, 191, 79, 56, 134, 197, 207, 163, 187, 140, 196, 216, 247, 229, 16, 146, 46, 205, 71, 180, 10, 28, 88, 41, 56, 34, 6, 69, 103, 185, 191, 60, 126, 225, 114, 12, 144, 76, 94, 12, 87, 247, 55], &[41, 0, 0, 0, 0, 0, 0, 0, 161, 15, 161, 15, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 80, 154, 155, 110, 250, 147, 245, 187, 131, 248, 88, 240, 186, 191, 211, 11], &[6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 85, 121, 41, 127, 77, 251, 150, 9, 231, 228, 194, 235, 171, 156, 228, 10], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 86, 239, 98, 39, 206, 203, 47, 7, 39, 76, 176, 87, 45, 143, 164, 194], &[101, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 106, 147, 17, 38, 51, 187, 51, 84, 230, 121, 82, 252, 221, 116, 12, 213], &[31, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[149, 17, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 245, 1]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 131, 229, 133, 187, 197, 253, 206, 197, 114, 25, 192, 220, 129, 239, 95, 244, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[125, 21, 161, 15, 4, 101, 0, 0, 0, 0, 0, 0, 0, 221, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0, 1, 21, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0, 0, 1, 21, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0, 1, 31, 0, 0, 0, 0, 0, 0, 0, 1, 11, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 146, 32, 225, 114, 190, 211, 22, 96, 95, 115, 241, 255, 123, 74, 222, 152, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 106, 99, 127, 98, 174, 42, 241, 199, 227, 30, 237, 126, 150, 190, 4, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[8, 11, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 156, 189, 47, 11, 41, 160, 8, 163, 96, 9, 172, 68, 204, 160, 201, 105], &[101, 4, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 166, 97, 104, 247, 231, 37, 155, 102, 112, 160, 111, 37, 101, 227, 229, 242], &[112, 23, 0, 0, 0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 180, 154, 39, 56, 238, 179, 8, 150, 170, 203, 139, 63, 180, 100, 113, 189], &[0, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 194, 154, 3, 16, 225, 187, 69, 210, 12, 172, 231, 124, 203, 98, 201, 125], &[0, 225, 245, 5]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 234, 7, 222, 43, 143, 1, 5, 22, 220, 163, 247, 239, 82, 247, 172, 90], &[12, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 2, 0, 0, 0, 6, 0, 0, 0]), (&[95, 62, 73, 7, 247, 22, 172, 137, 182, 52, 125, 21, 236, 236, 237, 202, 247, 218, 208, 49, 115, 36, 174, 202, 232, 116, 75, 135, 252, 149, 242, 243], &[0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 77, 254, 57, 245, 78, 123, 168, 74, 190, 191, 139, 138, 100, 234, 72, 46, 45, 174, 43, 141, 96, 79, 230, 7, 252, 242, 172, 39, 100, 43, 161, 177], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 137, 6, 59, 94, 171, 160, 104, 80, 196, 182, 201, 87, 28, 70, 86, 200, 124, 206, 182, 4, 38, 77, 37, 233, 32, 121, 227, 154, 137, 129, 91, 166], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 1, 0, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 232, 3, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 33, 143, 38, 199, 58, 221, 99, 72, 151, 85, 11, 64, 3, 178, 107, 198, 250, 67, 84, 127, 177, 206, 187, 248, 7, 171, 139, 143, 54, 116, 13, 218, 140, 198, 7, 172, 12, 47, 130, 190, 178, 198, 48, 211, 195, 137, 122, 61], &[4, 115, 116, 97, 107, 105, 110, 103, 32, 244, 1, 0, 0, 0, 0, 0, 0, 2]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 87, 200, 117, 228, 207, 247, 65, 72, 228, 98, 143, 38, 75, 151, 76, 128], &[214, 61, 165, 212, 232, 0, 0, 0]), (&[194, 38, 18, 118, 204, 157, 31, 133, 152, 234, 75, 106, 116, 177, 92, 47, 227, 253, 141, 247, 41, 112, 188, 130, 229, 38, 126, 160, 30, 221, 217, 73], &[1]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 83, 50, 194, 39, 103, 192, 178, 217, 125, 32, 22, 246, 168, 239, 120, 222, 173, 2, 155, 120, 7, 166, 176, 226, 172, 7, 43, 102, 21, 204, 92, 8], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 76, 1, 78, 107, 248, 184, 194, 192, 17, 231, 41, 11, 133, 105, 107, 179, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 170, 10, 228, 226, 42, 96, 247, 17, 144, 247, 250, 141, 205, 84, 104, 175, 49, 59, 161, 58, 250, 178, 59, 219, 34, 1, 81, 12, 128, 142, 182, 22], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 109, 123, 24, 86, 30, 19, 159, 228, 49, 200, 33, 139, 70, 205, 244, 47, 40, 190, 110, 43, 138, 12, 26, 121, 5, 219, 103, 1, 110, 91, 142, 123], &[11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 99, 128, 64, 70, 131, 252, 137, 232, 35, 52, 80, 200, 170, 25, 80, 159, 230, 50, 156, 192, 179, 158, 9, 52, 58, 115, 101, 115, 115, 105, 111, 110, 58, 107, 101, 121, 115, 238, 43, 102, 225, 142, 104, 190, 12, 183, 86, 115, 25, 37, 223, 229, 65, 253, 167, 152, 54, 57, 147, 167, 137, 80, 161, 2, 235, 195, 174, 40, 244], &[21, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 114, 118, 56, 0, 163, 106, 153, 253, 252, 124, 16, 246, 65, 95, 110, 230], &[8, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 136, 220, 222, 147, 76, 101, 130, 39, 238, 29, 250, 252, 214, 225, 105, 3], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 148, 80, 191, 164, 185, 106, 63, 167, 163, 200, 244, 13, 166, 191, 50, 225], &[0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 17, 218, 109, 31, 118, 29, 223, 155, 219, 76, 157, 110, 83, 3, 235, 212, 31, 97, 133, 141, 10, 86, 71, 161, 167, 191, 224, 137, 191, 146, 27, 233], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 38, 160, 142, 77, 12, 81, 144, 240, 24, 113, 224, 86, 155, 98, 144, 184, 103, 96, 8, 93, 153, 241, 126, 180, 231, 230, 181, 143, 235, 141, 98, 73], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 47, 1, 107, 122, 93, 185, 48, 218, 189, 234, 3, 170, 104, 210, 115, 77, 47, 164, 122, 5, 87, 226, 13, 19, 12, 193, 224, 68, 248, 220, 87, 150], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 91, 143, 41, 219, 118, 207, 78, 103, 110, 79, 201, 177, 112, 64, 49, 45, 235, 237, 175, 205, 86, 55, 251, 60, 123, 173, 210, 205, 220, 230, 164, 69], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 123, 10, 161, 115, 94, 91, 165, 141, 50, 54, 49, 108, 103, 31, 228, 240, 14, 211, 102, 238, 114, 65, 124, 158, 208, 42, 83, 168, 1, 158, 133, 184], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 3, 159, 247, 202, 161, 124, 206, 191, 202, 220, 68, 189, 159, 206, 106, 75, 102, 153, 196, 208, 61, 226, 227, 52, 154, 161, 220, 17, 25, 60, 215], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 140, 53, 210, 47, 69, 157, 119, 202, 76, 11, 11, 80, 53, 134, 151, 102, 214, 13, 24, 43, 151, 22, 171, 62, 136, 121, 224, 102, 71, 136, 153, 168], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 157, 255, 135, 106, 75, 148, 45, 10, 151, 17, 209, 130, 33, 137, 143, 17, 202, 57, 117, 21, 137, 235, 244, 212, 157, 116, 159, 107, 62, 73, 50, 146], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 225, 44, 34, 212, 241, 98, 217, 160, 18, 201, 49, 146, 51, 218, 93, 62, 146, 60, 197, 225, 2, 155, 143, 144, 228, 114, 73, 201, 171, 37, 107, 53], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 152, 239, 125, 192, 96, 67, 110, 78, 216, 3, 175, 7, 99, 43, 137, 182, 244, 170, 194, 251, 227, 63, 3, 85, 75, 254, 181, 89, 234, 38, 144, 237, 133, 33, 202, 164, 190, 150, 30, 97, 201, 26, 201, 161, 83, 13, 206, 122], &[44, 175, 112, 32, 141, 187, 198, 225, 208, 96, 126, 2, 8, 79, 212, 67, 222, 67, 84, 75, 213, 91, 72, 122, 231, 108, 125, 176, 42, 11, 117, 78, 2, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 224, 205, 208, 98, 230, 234, 242, 66, 149, 173, 76, 207, 196, 29, 70, 9], &[8, 21, 0, 0, 0, 0, 0, 0, 0, 21, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0, 11, 0, 0, 0, 0, 0, 0, 0]), (&[206, 197, 7, 13, 96, 157, 211, 73, 127, 114, 189, 224, 127, 201, 107, 160, 255, 58, 225, 39, 112, 190, 162, 228, 141, 155, 222, 115, 133, 231, 162, 95], &[0, 0, 0, 0, 10, 0, 0, 0]), (&[240, 195, 101, 195, 207, 89, 214, 113, 235, 114, 218, 14, 122, 65, 19, 196, 159, 31, 5, 21, 244, 98, 205, 207, 132, 224, 241, 214, 4, 93, 252, 187], &[64, 31, 0, 0, 0, 0, 0, 0])]; diff --git a/frame/staking/src/migration/tests.rs b/frame/staking/src/migration/tests.rs new file mode 100644 index 0000000000000000000000000000000000000000..d1ba35cadcead35f1252fa891f30e950b6fe3f02 --- /dev/null +++ b/frame/staking/src/migration/tests.rs @@ -0,0 +1,220 @@ +use crate::*; +use crate::mock::*; +use frame_support::storage::migration::*; +use sp_core::hashing::blake2_256; +use super::test_upgrade_from_master_dataset; +use sp_runtime::traits::OnRuntimeUpgrade; + +#[test] +fn upgrade_works() { + ExtBuilder::default().build().execute_with(|| { + start_era(3); + + assert_eq!(Session::validators(), vec![21, 11]); + + // Insert fake data to check the migration + put_storage_value::>(b"Staking", b"CurrentElected", b"", vec![21, 31]); + put_storage_value::(b"Staking", b"CurrentEraStartSessionIndex", b"", 5); + put_storage_value::>(b"Staking", b"CurrentEraStart", b"", 777); + put_storage_value( + b"Staking", b"Stakers", &blake2_256(&11u64.encode()), + Exposure:: { + total: 10, + own: 10, + others: vec![], + } + ); + put_storage_value( + b"Staking", b"Stakers", &blake2_256(&21u64.encode()), + Exposure:: { + total: 20, + own: 20, + others: vec![], + } + ); + put_storage_value( + b"Staking", b"Stakers", &blake2_256(&31u64.encode()), + Exposure:: { + total: 30, + own: 30, + others: vec![], + } + ); + put_storage_value::<(u32, Vec)>(b"Staking", b"CurrentEraPointsEarned", b"", (12, vec![2, 10])); + ::ErasStakers::remove_all(); + ::ErasStakersClipped::remove_all(); + + ::StorageVersion::put(Releases::V1_0_0); + + // Perform upgrade + Staking::on_runtime_upgrade(); + + assert_eq!(::StorageVersion::get(), Releases::V2_0_0); + + // Check migration + assert_eq!(::ErasStartSessionIndex::get(3).unwrap(), 5); + assert_eq!(::ErasRewardPoints::get(3), EraRewardPoints { + total: 12, + individual: vec![(21, 2), (31, 10)].into_iter().collect(), + }); + assert_eq!(::ActiveEra::get().unwrap().index, 3); + assert_eq!(::ActiveEra::get().unwrap().start, Some(777)); + assert_eq!(::CurrentEra::get().unwrap(), 3); + assert_eq!(::ErasStakers::get(3, 11), Exposure { + total: 0, + own: 0, + others: vec![], + }); + assert_eq!(::ErasStakers::get(3, 21), Exposure { + total: 20, + own: 20, + others: vec![], + }); + assert_eq!(::ErasStakers::get(3, 31), Exposure { + total: 30, + own: 30, + others: vec![], + }); + assert_eq!(::ErasStakersClipped::get(3, 11), Exposure { + total: 0, + own: 0, + others: vec![], + }); + assert_eq!(::ErasStakersClipped::get(3, 21), Exposure { + total: 20, + own: 20, + others: vec![], + }); + assert_eq!(::ErasStakersClipped::get(3, 31), Exposure { + total: 30, + own: 30, + others: vec![], + }); + assert_eq!(::ErasValidatorPrefs::get(3, 21), Staking::validators(21)); + assert_eq!(::ErasValidatorPrefs::get(3, 31), Staking::validators(31)); + assert_eq!(::ErasTotalStake::get(3), 50); + }) +} + +// Test that an upgrade from previous test environment works. +#[test] +fn test_upgrade_from_master_works() { + let data_sets = &[ + test_upgrade_from_master_dataset::_0, + test_upgrade_from_master_dataset::_1, + test_upgrade_from_master_dataset::_2, + test_upgrade_from_master_dataset::_3, + test_upgrade_from_master_dataset::_4, + test_upgrade_from_master_dataset::_5, + test_upgrade_from_master_dataset::_6, + test_upgrade_from_master_dataset::_7, + test_upgrade_from_master_dataset::_8, + ]; + for data_set in data_sets.iter() { + let mut storage = sp_runtime::Storage::default(); + for (key, value) in data_set.iter() { + storage.top.insert(key.to_vec(), value.to_vec()); + } + let mut ext = sp_io::TestExternalities::from(storage); + ext.execute_with(|| { + let old_stakers = + get_storage_value::>(b"Staking", b"CurrentElected", b"").unwrap(); + let old_staker_0 = old_stakers[0]; + let old_staker_1 = old_stakers[1]; + let old_current_era = + get_storage_value::(b"Staking", b"CurrentEra", b"").unwrap(); + let old_staker_0_exposure = get_storage_value::>( + b"Staking", b"Stakers", &blake2_256(&old_staker_0.encode()) + ).unwrap(); + let old_staker_1_exposure = get_storage_value::>( + b"Staking", b"Stakers", &blake2_256(&old_staker_1.encode()) + ).unwrap(); + let ( + old_era_points_earned_total, + old_era_points_earned_individual + ) = get_storage_value::<(u32, Vec)>(b"Staking", b"CurrentEraPointsEarned", b"") + .unwrap_or((0, vec![])); + + Staking::on_runtime_upgrade(); + assert!(::StorageVersion::get() == Releases::V2_0_0); + + // Check ActiveEra and CurrentEra + let active_era = Staking::active_era().unwrap().index; + let current_era = Staking::current_era().unwrap(); + assert!(current_era == active_era); + assert!(current_era == old_current_era); + + // Check ErasStartSessionIndex + let active_era_start = Staking::eras_start_session_index(active_era).unwrap(); + let current_era_start = Staking::eras_start_session_index(current_era).unwrap(); + let current_session_index = Session::current_index(); + assert!(current_era_start == active_era_start); + assert!(active_era_start <= current_session_index); + assert_eq!(::ErasStartSessionIndex::iter().count(), 1); + + // Check ErasStakers + assert_eq!(::ErasStakers::iter().count(), 2); + assert_eq!( + ::ErasStakers::get(current_era, old_staker_0), + old_staker_0_exposure + ); + assert_eq!( + ::ErasStakers::get(current_era, old_staker_1), + old_staker_1_exposure + ); + + // Check ErasStakersClipped + assert_eq!(::ErasStakersClipped::iter().count(), 2); + assert!(::ErasStakersClipped::iter().all(|exposure_clipped| { + let max = ::MaxNominatorRewardedPerValidator::get() as usize; + exposure_clipped.others.len() <= max + })); + assert_eq!( + ::ErasStakersClipped::get(current_era, old_staker_0), + old_staker_0_exposure + ); + assert_eq!( + ::ErasStakersClipped::get(current_era, old_staker_1), + old_staker_1_exposure + ); + + // Check ErasValidatorPrefs + assert_eq!(::ErasValidatorPrefs::iter().count(), 2); + assert_eq!( + ::ErasValidatorPrefs::get(current_era, old_staker_0), + Staking::validators(old_staker_0) + ); + assert_eq!( + ::ErasValidatorPrefs::get(current_era, old_staker_1), + Staking::validators(old_staker_1) + ); + + // Check ErasTotalStake + assert_eq!(::ErasTotalStake::iter().count(), 1); + assert_eq!( + ::ErasTotalStake::get(current_era), + old_staker_0_exposure.total + old_staker_1_exposure.total + ); + + // Check ErasRewardPoints + assert_eq!(::ErasRewardPoints::iter().count(), 1); + let mut individual = BTreeMap::new(); + if let Some(p) = old_era_points_earned_individual.get(0) { + individual.insert(old_staker_0, p.clone()); + } + if let Some(p) = old_era_points_earned_individual.get(1) { + individual.insert(old_staker_1, p.clone()); + } + assert_eq!( + ::ErasRewardPoints::get(current_era), + EraRewardPoints { + total: old_era_points_earned_total, + individual, + } + ); + + // Check ErasValidatorReward + assert_eq!(::ErasValidatorReward::iter().count(), 0); + }); + } +} diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index c5f3ef250803a3d18c8f6649507b41335c3feede..b7cae91bedfa5262748ead3f1974d8f9e4a36c34 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -16,22 +16,23 @@ //! Test utilities -use std::{collections::HashSet, cell::RefCell}; +use std::{collections::{HashSet, HashMap}, cell::RefCell}; use sp_runtime::{Perbill, KeyTypeId}; use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::traits::{IdentityLookup, Convert, OpaqueKeys, OnInitialize, SaturatedConversion}; +use sp_runtime::traits::{IdentityLookup, Convert, OpaqueKeys, OnInitialize, OnFinalize, SaturatedConversion}; use sp_runtime::testing::{Header, UintAuthorityId}; use sp_staking::{SessionIndex, offence::{OffenceDetails, OnOffenceHandler}}; use sp_core::{H256, crypto::key_types}; use sp_io; use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, StorageLinkedMap, StorageValue, + assert_ok, impl_outer_origin, parameter_types, StorageLinkedMap, StorageValue, StorageMap, + StorageDoubleMap, traits::{Currency, Get, FindAuthor}, weights::Weight, }; use crate::{ EraIndex, GenesisConfig, Module, Trait, StakerStatus, ValidatorPrefs, RewardDestination, - Nominators, inflation + Nominators, inflation, SessionInterface, Exposure, ErasStakers, ErasRewardPoints }; /// The AccountId alias in this test module. @@ -140,12 +141,12 @@ impl frame_system::Trait for Test { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = (Balances, Staking, Session); + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { type Balance = Balance; - type Event = (); type DustRemoval = (); + type Event = (); type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; } @@ -156,13 +157,13 @@ parameter_types! { pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } impl pallet_session::Trait for Test { - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type Keys = UintAuthorityId; - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionHandler = TestSessionHandler; type Event = (); type ValidatorId = AccountId; type ValidatorIdOf = crate::StashOf; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = TestSessionHandler; + type Keys = UintAuthorityId; type DisabledValidatorsThreshold = DisabledValidatorsThreshold; } @@ -198,6 +199,7 @@ parameter_types! { pub const SessionsPerEra: SessionIndex = 3; pub const BondingDuration: EraIndex = 3; pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; + pub const MaxNominatorRewardedPerValidator: u32 = 64; } impl Trait for Test { type Currency = pallet_balances::Module; @@ -213,6 +215,7 @@ impl Trait for Test { type BondingDuration = BondingDuration; type SessionInterface = Self; type RewardCurve = RewardCurve; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; } pub struct ExtBuilder { @@ -300,22 +303,22 @@ impl ExtBuilder { let _ = pallet_balances::GenesisConfig::{ balances: vec![ - (1, 10 * balance_factor), - (2, 20 * balance_factor), - (3, 300 * balance_factor), - (4, 400 * balance_factor), - (10, balance_factor), - (11, balance_factor * 1000), - (20, balance_factor), - (21, balance_factor * 2000), - (30, balance_factor), - (31, balance_factor * 2000), - (40, balance_factor), - (41, balance_factor * 2000), - (100, 2000 * balance_factor), - (101, 2000 * balance_factor), - // This allow us to have a total_payout different from 0. - (999, 1_000_000_000_000), + (1, 10 * balance_factor), + (2, 20 * balance_factor), + (3, 300 * balance_factor), + (4, 400 * balance_factor), + (10, balance_factor), + (11, balance_factor * 1000), + (20, balance_factor), + (21, balance_factor * 2000), + (30, balance_factor), + (31, balance_factor * 2000), + (40, balance_factor), + (41, balance_factor * 2000), + (100, 2000 * balance_factor), + (101, 2000 * balance_factor), + // This allow us to have a total_payout different from 0. + (999, 1_000_000_000_000), ], }.assimilate_storage(&mut storage); @@ -328,7 +331,6 @@ impl ExtBuilder { }; let nominated = if self.nominate { vec![11, 21] } else { vec![] }; let _ = GenesisConfig::{ - current_era: 0, stakers: vec![ // (stash, controller, staked_amount, status) (11, 10, balance_factor * 1000, StakerStatus::::Validator), @@ -346,7 +348,7 @@ impl ExtBuilder { }.assimilate_storage(&mut storage); let _ = pallet_session::GenesisConfig:: { - keys: validators.iter().map(|x| (*x, UintAuthorityId(*x))).collect(), + keys: validators.iter().map(|x| (*x, *x, UintAuthorityId(*x))).collect(), }.assimilate_storage(&mut storage); let mut ext = sp_io::TestExternalities::from(storage); @@ -366,35 +368,34 @@ pub type Session = pallet_session::Module; pub type Timestamp = pallet_timestamp::Module; pub type Staking = Module; -pub fn check_exposure_all() { - Staking::current_elected().into_iter().for_each(|acc| check_exposure(acc)); +pub fn check_exposure_all(era: EraIndex) { + ErasStakers::::iter_prefix(era).for_each(check_exposure) } -pub fn check_nominator_all() { - >::enumerate().for_each(|(acc, _)| check_nominator_exposure(acc)); +pub fn check_nominator_all(era: EraIndex) { + >::enumerate() + .for_each(|(acc, _)| check_nominator_exposure(era, acc)); } /// Check for each selected validator: expo.total = Sum(expo.other) + expo.own -pub fn check_exposure(stash: u64) { - assert_is_stash(stash); - let expo = Staking::stakers(&stash); +pub fn check_exposure(expo: Exposure) { assert_eq!( expo.total as u128, expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), - "wrong total exposure for {:?}: {:?}", stash, expo, + "wrong total exposure {:?}", expo, ); } /// Check that for each nominator: slashable_balance > sum(used_balance) /// Note: we might not consume all of a nominator's balance, but we MUST NOT over spend it. -pub fn check_nominator_exposure(stash: u64) { +pub fn check_nominator_exposure(era: EraIndex, stash: AccountId) { assert_is_stash(stash); let mut sum = 0; - Staking::current_elected() - .iter() - .map(|v| Staking::stakers(v)) - .for_each(|e| e.others.iter() - .filter(|i| i.who == stash) - .for_each(|i| sum += i.value)); + ErasStakers::::iter_prefix(era) + .for_each(|exposure| { + exposure.others.iter() + .filter(|i| i.who == stash) + .for_each(|i| sum += i.value) + }); let nominator_stake = Staking::slashable_balance_of(&stash); // a nominator cannot over-spend. assert!( @@ -403,11 +404,11 @@ pub fn check_nominator_exposure(stash: u64) { ); } -pub fn assert_is_stash(acc: u64) { +pub fn assert_is_stash(acc: AccountId) { assert!(Staking::bonded(&acc).is_some(), "Not a stash."); } -pub fn assert_ledger_consistent(stash: u64) { +pub fn assert_ledger_consistent(stash: AccountId) { assert_is_stash(stash); let ledger = Staking::ledger(stash - 1).unwrap(); @@ -437,9 +438,8 @@ pub fn advance_session() { } pub fn start_session(session_index: SessionIndex) { - // Compensate for session delay - let session_index = session_index + 1; for i in Session::current_index()..session_index { + Staking::on_finalize(System::block_number()); System::set_block_number((i + 1).into()); Timestamp::set_timestamp(System::block_number() * 1000); Session::on_initialize(System::block_number()); @@ -450,22 +450,21 @@ pub fn start_session(session_index: SessionIndex) { pub fn start_era(era_index: EraIndex) { start_session((era_index * 3).into()); - assert_eq!(Staking::current_era(), era_index); + assert_eq!(Staking::active_era().unwrap().index, era_index); } pub fn current_total_payout_for_duration(duration: u64) -> u64 { inflation::compute_total_payout( ::RewardCurve::get(), - >::slot_stake() * 2, + Staking::eras_total_stake(Staking::active_era().unwrap().index), Balances::total_issuance(), duration, ).0 } pub fn reward_all_elected() { - let rewards = >::current_elected().iter() - .map(|v| (*v, 1)) - .collect::>(); + let rewards = ::SessionInterface::validators().into_iter() + .map(|v| (v, 1)); >::reward_by_ids(rewards) } @@ -489,8 +488,8 @@ pub fn on_offence_in_era( } } - if Staking::current_era() == era { - Staking::on_offence(offenders, slash_fraction, Staking::current_era_start_session_index()); + if Staking::active_era().unwrap().index == era { + Staking::on_offence(offenders, slash_fraction, Staking::eras_start_session_index(era).unwrap()); } else { panic!("cannot slash in era {}", era); } @@ -500,6 +499,38 @@ pub fn on_offence_now( offenders: &[OffenceDetails>], slash_fraction: &[Perbill], ) { - let now = Staking::current_era(); + let now = Staking::active_era().unwrap().index; on_offence_in_era(offenders, slash_fraction, now) } + +/// Make all validator and nominator request their payment +pub fn make_all_reward_payment(era: EraIndex) { + let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() + .cloned() + .collect::>(); + + // reward nominators + let mut nominator_controllers = HashMap::new(); + for validator in Staking::eras_reward_points(era).individual.keys() { + let validator_exposure = Staking::eras_stakers_clipped(era, validator); + for (nom_index, nom) in validator_exposure.others.iter().enumerate() { + if let Some(nom_ctrl) = Staking::bonded(nom.who) { + nominator_controllers.entry(nom_ctrl) + .or_insert(vec![]) + .push((validator.clone(), nom_index as u32)); + } + } + } + for (nominator_controller, validators_with_nom_index) in nominator_controllers { + assert_ok!(Staking::payout_nominator( + Origin::signed(nominator_controller), + era, + validators_with_nom_index, + )); + } + + // reward validators + for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { + assert_ok!(Staking::payout_validator(Origin::signed(validator_controller), era)); + } +} diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 73215d611777696087650395d4da5a9d70db9fe0..507b5591d5f873527e5d6c10e558fc4c76553150 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -23,9 +23,11 @@ use sp_staking::offence::OffenceDetails; use frame_support::{ assert_ok, assert_noop, traits::{Currency, ReservableCurrency}, - dispatch::DispatchError, StorageMap, + StorageMap, }; +use pallet_balances::Error as BalancesError; use substrate_test_utils::assert_eq_uvec; +use crate::Store; #[test] fn force_unstake_works() { @@ -36,11 +38,7 @@ fn force_unstake_works() { // Cant transfer assert_noop!( Balances::transfer(Origin::signed(11), 1, 10), - DispatchError::Module { - index: 0, - error: 1, - message: Some("LiquidityRestrictions"), - } + BalancesError::::LiquidityRestrictions ); // Force unstake requires root. assert_noop!(Staking::force_unstake(Origin::signed(11), 11), BadOrigin); @@ -67,12 +65,12 @@ fn basic_setup_works() { // Account 10 controls the stash from account 11, which is 100 * balance_factor units assert_eq!( Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![] }) + Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], last_reward: None }) ); // Account 20 controls the stash from account 21, which is 200 * balance_factor units assert_eq!( Staking::ledger(&20), - Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![] }) + Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], last_reward: None }) ); // Account 1 does not control any stash assert_eq!(Staking::ledger(&1), None); @@ -86,27 +84,35 @@ fn basic_setup_works() { assert_eq!( Staking::ledger(100), - Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![] }) + Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], last_reward: None }) ); assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); assert_eq!( - Staking::stakers(11), - Exposure { total: 1125, own: 1000, others: vec![ IndividualExposure { who: 101, value: 125 }] } + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1125, + own: 1000, + others: vec![ IndividualExposure { who: 101, value: 125 }] + }, ); assert_eq!( - Staking::stakers(21), - Exposure { total: 1375, own: 1000, others: vec![ IndividualExposure { who: 101, value: 375 }] } + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Exposure { + total: 1375, + own: 1000, + others: vec![ IndividualExposure { who: 101, value: 375 }] + }, ); // initial slot_stake - assert_eq!(Staking::slot_stake(), 1125); + assert_eq!(Staking::eras_total_stake(Staking::active_era().unwrap().index), 2500); // The number of validators required. assert_eq!(Staking::validator_count(), 2); // Initial Era and session - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); // Account 10 has `balance_factor` free balance assert_eq!(Balances::free_balance(10), 1); @@ -116,8 +122,8 @@ fn basic_setup_works() { assert_eq!(Staking::force_era(), Forcing::NotForcing); // All exposures must be correct. - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -126,10 +132,10 @@ fn change_controller_works() { ExtBuilder::default().build().execute_with(|| { assert_eq!(Staking::bonded(&11), Some(10)); - assert!(>::enumerate().map(|(c, _)| c).collect::>().contains(&11)); + assert!(Session::validators().contains(&11)); // 10 can control 11 who is initially a validator. assert_ok!(Staking::chill(Origin::signed(10))); - assert!(!>::enumerate().map(|(c, _)| c).collect::>().contains(&11)); + assert!(Session::validators().contains(&11)); assert_ok!(Staking::set_controller(Origin::signed(11), 5)); @@ -149,114 +155,87 @@ fn rewards_should_work() { // * rewards get recorded per session // * rewards get paid per Era // * Check that nominators are also rewarded - ExtBuilder::default().nominate(false).build().execute_with(|| { - // Init some balances - let _ = Balances::make_free_balance_be(&2, 500); - - let delay = 1000; - let init_balance_2 = Balances::total_balance(&2); + ExtBuilder::default().nominate(true).build().execute_with(|| { let init_balance_10 = Balances::total_balance(&10); let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // Initial config should be correct - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 0); - - // Add a dummy nominator. - // - // Equal division indicates that the reward will be equally divided among validator and - // nominator. - >::insert(&11, Exposure { - own: 500, - total: 1000, - others: vec![IndividualExposure {who: 2, value: 500 }] - }); - - >::insert(&2, RewardDestination::Stash); - assert_eq!(Staking::payee(2), RewardDestination::Stash); - assert_eq!(Staking::payee(11), RewardDestination::Controller); + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); - let mut block = 3; // Block 3 => Session 1 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block * 5000); // on time. - Session::on_initialize(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 1); >::reward_by_ids(vec![(11, 50)]); >::reward_by_ids(vec![(11, 50)]); // This is the second validator of the current elected set. >::reward_by_ids(vec![(21, 50)]); - // This must be no-op as it is not an elected validator. - >::reward_by_ids(vec![(1001, 10_000)]); // Compute total payout now for whole duration as other parameter won't change - let total_payout = current_total_payout_for_duration(9 * 5 * 1000); - assert!(total_payout > 10); // Test is meaningful if reward something - - // No reward yet - assert_eq!(Balances::total_balance(&2), init_balance_2); - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - - block = 6; // Block 6 => Session 2 => Era 0 - System::set_block_number(block); - Timestamp::set_timestamp(block * 5000 + delay); // a little late. - Session::on_initialize(System::block_number()); - assert_eq!(Staking::current_era(), 0); - assert_eq!(Session::current_index(), 2); + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something - block = 9; // Block 9 => Session 3 => Era 1 - System::set_block_number(block); - Timestamp::set_timestamp(block * 5000); // back to being on time. no delays - Session::on_initialize(System::block_number()); - assert_eq!(Staking::current_era(), 1); - assert_eq!(Session::current_index(), 3); + start_session(1); - // 11 validator has 2/3 of the total rewards and half half for it and its nominator - assert_eq_error_rate!(Balances::total_balance(&2), init_balance_2 + total_payout / 3, 1); - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + total_payout / 3, 1); + assert_eq!(Balances::total_balance(&10), init_balance_10); assert_eq!(Balances::total_balance(&11), init_balance_11); - }); -} - -#[test] -fn multi_era_reward_should_work() { - // Should check that: - // The value of current_session_reward is set at the end of each era, based on - // slot_stake and session_reward. - ExtBuilder::default().nominate(false).build().execute_with(|| { - let init_balance_10 = Balances::total_balance(&10); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // Compute now as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { + total: 50*3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + }); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); - start_session(0); - start_session(1); start_session(2); start_session(3); - assert_eq!(Staking::current_era(), 1); - assert_eq!(Balances::total_balance(&10), init_balance_10 + total_payout_0); + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment(0); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2/3 + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - start_session(4); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); - let total_payout_1 = current_total_payout_for_duration(3000); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); assert!(total_payout_1 > 10); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 101)]); - - // new era is triggered here. - start_session(5); - // pay time - assert_eq!(Balances::total_balance(&10), init_balance_10 + total_payout_0 + total_payout_1); + start_era(2); + mock::make_all_reward_payment(1); + + assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1/3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); }); } @@ -271,6 +250,9 @@ fn staking_should_work() { .fair(false) // to give 20 more staked value .build() .execute_with(|| { + // --- Block 1: + start_session(1); + Timestamp::set_timestamp(1); // Initialize time. // remember + compare this along with the test. @@ -279,55 +261,64 @@ fn staking_should_work() { // put some money in account that we'll use. for i in 1..5 { let _ = Balances::make_free_balance_be(&i, 2000); } - // --- Block 1: - start_session(1); + // --- Block 2: + start_session(2); // add a new candidate for being a validator. account 3 controlled by 4. assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); + let current_era_at_bond = Staking::current_era(); assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); // No effects will be seen so far. assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // --- Block 2: - start_session(2); + // --- Block 3: + start_session(3); // No effects will be seen so far. Era has not been yet triggered. assert_eq_uvec!(validator_controllers(), vec![20, 10]); - // --- Block 3: the validators will now be queued. - start_session(3); - assert_eq!(Staking::current_era(), 1); - - // --- Block 4: the validators will now be changed. + // --- Block 4: the validators will now be queued. start_session(4); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // --- Block 5: the validators are still in queue. + start_session(5); + + // --- Block 6: the validators will now be changed. + start_session(6); assert_eq_uvec!(validator_controllers(), vec![20, 4]); - // --- Block 4: Unstake 4 as a validator, freeing up the balance stashed in 3 + // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 // 4 will chill Staking::chill(Origin::signed(4)).unwrap(); - // --- Block 5: nothing. 4 is still there. - start_session(5); + // --- Block 7: nothing. 4 is still there. + start_session(7); assert_eq_uvec!(validator_controllers(), vec![20, 4]); - // --- Block 6: 4 will not be a validator. - start_session(7); + // --- Block 8: + start_session(8); + + // --- Block 9: 4 will not be a validator. + start_session(9); assert_eq_uvec!(validator_controllers(), vec![20, 10]); // Note: the stashed value of 4 is still lock assert_eq!( Staking::ledger(&4), - Some(StakingLedger { stash: 3, total: 1500, active: 1500, unlocking: vec![] }) + Some(StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: vec![], + last_reward: current_era_at_bond, + }) ); // e.g. it cannot spend more than 500 that it has free from the total 2000 assert_noop!( Balances::reserve(&3, 501), - DispatchError::Module { - index: 0, - error: 1, - message: Some("LiquidityRestrictions"), - } + BalancesError::::LiquidityRestrictions ); assert_ok!(Balances::reserve(&3, 409)); }); @@ -353,11 +344,12 @@ fn less_than_needed_candidates_works() { // But the exposure is updated in a simple way. No external votes exists. // This is purely self-vote. - assert_eq!(Staking::stakers(10).others.len(), 0); - assert_eq!(Staking::stakers(20).others.len(), 0); - assert_eq!(Staking::stakers(30).others.len(), 0); - check_exposure_all(); - check_nominator_all(); + assert!( + ErasStakers::::iter_prefix(Staking::active_era().unwrap().index) + .all(|exposure| exposure.others.is_empty()) + ); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -379,6 +371,7 @@ fn no_candidate_emergency_condition() { // set the minimum validator count. ::MinimumValidatorCount::put(10); + // try to chill let _ = Staking::chill(Origin::signed(10)); // trigger era @@ -461,8 +454,6 @@ fn nominating_and_rewards_should_work() { assert!(total_payout_0 > 100); // Test is meaningful if reward something >::reward_by_ids(vec![(41, 1)]); >::reward_by_ids(vec![(31, 1)]); - >::reward_by_ids(vec![(21, 10)]); // must be no-op - >::reward_by_ids(vec![(11, 10)]); // must be no-op start_era(1); @@ -470,45 +461,40 @@ fn nominating_and_rewards_should_work() { assert_eq_uvec!(validator_controllers(), vec![20, 10]); // OLD validators must have already received some rewards. + mock::make_all_reward_payment(0); assert_eq!(Balances::total_balance(&40), 1 + total_payout_0 / 2); assert_eq!(Balances::total_balance(&30), 1 + total_payout_0 / 2); // ------ check the staked value of all parties. - // total expo of 10, with 1200 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(11).own, 1000); - assert_eq!(Staking::stakers(11).total, 1000 + 800); - // 2 and 4 supported 10, each with stake 600, according to phragmen. + // 30 and 40 are not chosen anymore + assert_eq!(ErasStakers::::iter_prefix(Staking::active_era().unwrap().index).count(), 2); assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.value).collect::>>(), - vec![400, 400] - ); - assert_eq!( - Staking::stakers(11).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] - ); - // total expo of 20, with 500 coming from nominators (externals), according to phragmen. - assert_eq!(Staking::stakers(21).own, 1000); - assert_eq_error_rate!(Staking::stakers(21).total, 1000 + 1200, 2); - // 2 and 4 supported 20, each with stake 250, according to phragmen. - assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.value).collect::>>(), - vec![600, 600] + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1000 + 800, + own: 1000, + others: vec![ + IndividualExposure { who: 3, value: 400 }, + IndividualExposure { who: 1, value: 400 }, + ] + }, ); assert_eq!( - Staking::stakers(21).others.iter().map(|e| e.who).collect::>(), - vec![3, 1] + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Exposure { + total: 1000 + 1200, + own: 1000, + others: vec![ + IndividualExposure { who: 3, value: 600 }, + IndividualExposure { who: 1, value: 600 }, + ] + }, ); - // They are not chosen anymore - assert_eq!(Staking::stakers(31).total, 0); - assert_eq!(Staking::stakers(41).total, 0); - // the total reward for era 1 let total_payout_1 = current_total_payout_for_duration(3000); assert!(total_payout_1 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(41, 10)]); // must be no-op - >::reward_by_ids(vec![(31, 10)]); // must be no-op >::reward_by_ids(vec![(21, 2)]); >::reward_by_ids(vec![(11, 1)]); @@ -517,6 +503,7 @@ fn nominating_and_rewards_should_work() { // nothing else will happen, era ends and rewards are paid again, // it is expected that nominators will also be paid. See below + mock::make_all_reward_payment(1); let payout_for_10 = total_payout_1 / 3; let payout_for_20 = 2 * total_payout_1 / 3; // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 @@ -545,8 +532,8 @@ fn nominating_and_rewards_should_work() { 1, ); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -589,13 +576,13 @@ fn nominators_also_get_slashed() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], &[Perbill::from_percent(5)], ); - let expo = Staking::stakers(11); + let expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); let slash_value = 50; let total_slash = expo.total.min(slash_value); let validator_slash = expo.own.min(total_slash); @@ -604,8 +591,8 @@ fn nominators_also_get_slashed() { // initial + first era reward + slash assert_eq!(Balances::total_balance(&11), initial_balance - validator_slash); assert_eq!(Balances::total_balance(&2), initial_balance - nominator_slash); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); // Because slashing happened. assert!(is_disabled(10)); }); @@ -660,42 +647,52 @@ fn double_controlling_should_fail() { #[test] fn session_and_eras_work() { ExtBuilder::default().build().execute_with(|| { - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); // Block 1: No change. - start_session(0); + start_session(1); assert_eq!(Session::current_index(), 1); - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); - // Block 2: Simple era change. + // Block 2: No change. start_session(2); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Session::current_index(), 2); + assert_eq!(Staking::active_era().unwrap().index, 0); - // Block 3: Schedule an era length change; no visible changes. + // Block 3: Era increment. start_session(3); + assert_eq!(Session::current_index(), 3); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // Block 4: No change. + start_session(4); assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Staking::active_era().unwrap().index, 1); - // Block 4: Era change kicks in. + // Block 5: No change. start_session(5); - assert_eq!(Session::current_index(), 6); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Session::current_index(), 5); + assert_eq!(Staking::active_era().unwrap().index, 1); - // Block 5: No change. + // Block 6: Era increment. start_session(6); - assert_eq!(Session::current_index(), 7); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Session::current_index(), 6); + assert_eq!(Staking::active_era().unwrap().index, 2); - // Block 6: No change. + // Block 7: No change. start_session(7); - assert_eq!(Session::current_index(), 8); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Session::current_index(), 7); + assert_eq!(Staking::active_era().unwrap().index, 2); - // Block 7: Era increment. + // Block 8: No change. start_session(8); + assert_eq!(Session::current_index(), 8); + assert_eq!(Staking::active_era().unwrap().index, 2); + + // Block 9: Era increment. + start_session(9); assert_eq!(Session::current_index(), 9); - assert_eq!(Staking::current_era(), 3); + assert_eq!(Staking::active_era().unwrap().index, 3); }); } @@ -703,50 +700,53 @@ fn session_and_eras_work() { fn forcing_new_era_works() { ExtBuilder::default().build().execute_with(|| { // normal flow of session. - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); start_session(0); - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); start_session(1); - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); start_session(2); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Staking::active_era().unwrap().index, 0); + start_session(3); + assert_eq!(Staking::active_era().unwrap().index, 1); // no era change. ForceEra::put(Forcing::ForceNone); - start_session(3); - assert_eq!(Staking::current_era(), 1); start_session(4); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Staking::active_era().unwrap().index, 1); start_session(5); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Staking::active_era().unwrap().index, 1); start_session(6); - assert_eq!(Staking::current_era(), 1); + assert_eq!(Staking::active_era().unwrap().index, 1); + start_session(7); + assert_eq!(Staking::active_era().unwrap().index, 1); // back to normal. // this immediately starts a new session. ForceEra::put(Forcing::NotForcing); - start_session(7); - assert_eq!(Staking::current_era(), 2); start_session(8); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Staking::active_era().unwrap().index, 1); // There is one session delay + start_session(9); + assert_eq!(Staking::active_era().unwrap().index, 2); // forceful change ForceEra::put(Forcing::ForceAlways); - start_session(9); - assert_eq!(Staking::current_era(), 3); start_session(10); - assert_eq!(Staking::current_era(), 4); + assert_eq!(Staking::active_era().unwrap().index, 2); // There is one session delay start_session(11); - assert_eq!(Staking::current_era(), 5); + assert_eq!(Staking::active_era().unwrap().index, 3); + start_session(12); + assert_eq!(Staking::active_era().unwrap().index, 4); // just one forceful change ForceEra::put(Forcing::ForceNew); - start_session(12); - assert_eq!(Staking::current_era(), 6); - - assert_eq!(ForceEra::get(), Forcing::NotForcing); start_session(13); - assert_eq!(Staking::current_era(), 6); + assert_eq!(Staking::active_era().unwrap().index, 5); + assert_eq!(ForceEra::get(), Forcing::NotForcing); + start_session(14); + assert_eq!(Staking::active_era().unwrap().index, 6); + start_session(15); + assert_eq!(Staking::active_era().unwrap().index, 6); }); } @@ -760,15 +760,11 @@ fn cannot_transfer_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::stakers(&11).total, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::transfer(Origin::signed(11), 20, 1), - DispatchError::Module { - index: 0, - error: 1, - message: Some("LiquidityRestrictions"), - } + BalancesError::::LiquidityRestrictions ); // Give account 11 extra free balance @@ -789,15 +785,11 @@ fn cannot_transfer_staked_balance_2() { // Confirm account 21 has some free balance assert_eq!(Balances::free_balance(21), 2000); // Confirm account 21 (via controller 20) is totally staked - assert_eq!(Staking::stakers(&21).total, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 1000); // Confirm account 21 can transfer at most 1000 assert_noop!( Balances::transfer(Origin::signed(21), 20, 1001), - DispatchError::Module { - index: 0, - error: 1, - message: Some("LiquidityRestrictions"), - } + BalancesError::::LiquidityRestrictions ); assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); }); @@ -812,15 +804,11 @@ fn cannot_reserve_staked_balance() { // Confirm account 11 has some free balance assert_eq!(Balances::free_balance(11), 1000); // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::stakers(&11).own, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); // Confirm account 11 cannot transfer as a result assert_noop!( Balances::reserve(&11, 1), - DispatchError::Module { - index: 0, - error: 1, - message: Some("LiquidityRestrictions"), - } + BalancesError::::LiquidityRestrictions ); // Give account 11 extra free balance @@ -835,7 +823,7 @@ fn reward_destination_works() { // Rewards go to the correct destination as determined in Payee ExtBuilder::default().nominate(false).build().execute_with(|| { // Check that account 11 is a validator - assert!(Staking::current_elected().contains(&11)); + assert!(Session::validators().contains(&11)); // Check the balance of the validator account assert_eq!(Balances::free_balance(10), 1); // Check the balance of the stash account @@ -846,6 +834,7 @@ fn reward_destination_works() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, })); // Compute total payout now for whole duration as other parameter won't change @@ -854,6 +843,7 @@ fn reward_destination_works() { >::reward_by_ids(vec![(11, 1)]); start_era(1); + mock::make_all_reward_payment(0); // Check that RewardDestination is Staked (default) assert_eq!(Staking::payee(&11), RewardDestination::Staked); @@ -865,6 +855,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], + last_reward: Some(0), })); //Change RewardDestination to Stash @@ -876,6 +867,7 @@ fn reward_destination_works() { >::reward_by_ids(vec![(11, 1)]); start_era(2); + mock::make_all_reward_payment(1); // Check that RewardDestination is Stash assert_eq!(Staking::payee(&11), RewardDestination::Stash); @@ -889,6 +881,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], + last_reward: Some(1), })); // Change RewardDestination to Controller @@ -903,6 +896,7 @@ fn reward_destination_works() { >::reward_by_ids(vec![(11, 1)]); start_era(3); + mock::make_all_reward_payment(2); // Check that RewardDestination is Controller assert_eq!(Staking::payee(&11), RewardDestination::Controller); @@ -914,6 +908,7 @@ fn reward_destination_works() { total: 1000 + total_payout_0, active: 1000 + total_payout_0, unlocking: vec![], + last_reward: Some(2), })); // Check that amount in staked account is NOT increased. assert_eq!(Balances::free_balance(11), recorded_stash_balance); @@ -926,45 +921,39 @@ fn validator_payment_prefs_work() { // Note: unstake threshold is being directly tested in slashing tests. // This test will focus on validator payment. ExtBuilder::default().build().execute_with(|| { - // Initial config - let stash_initial_balance = Balances::total_balance(&11); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&10), 1); - // check the balance of a validator's stash accounts. - assert_eq!(Balances::total_balance(&11), stash_initial_balance); - // and the nominator (to-be) - let _ = Balances::make_free_balance_be(&2, 500); - - // add a dummy nominator. - >::insert(&11, Exposure { - own: 500, // equal division indicates that the reward will be equally divided among validator and nominator. - total: 1000, - others: vec![IndividualExposure {who: 2, value: 500 }] - }); - >::insert(&2, RewardDestination::Stash); + let commission = Perbill::from_percent(40); >::insert(&11, ValidatorPrefs { - commission: Perbill::from_percent(50), + commission: commission.clone(), }); + // Reward controller so staked ratio doesn't change. + >::insert(&11, RewardDestination::Controller); + >::insert(&101, RewardDestination::Controller); + + start_era(1); + mock::make_all_reward_payment(0); + + let balance_era_1_10 = Balances::total_balance(&10); + let balance_era_1_100 = Balances::total_balance(&100); + // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 100); // Test is meaningful if reward something + let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); >::reward_by_ids(vec![(11, 1)]); - start_era(1); + start_era(2); + mock::make_all_reward_payment(1); - // whats left to be shared is the sum of 3 rounds minus the validator's cut. - let shared_cut = total_payout_0 / 2; - // Validator's payee is Staked account, 11, reward will be paid here. - assert_eq!(Balances::total_balance(&11), stash_initial_balance + shared_cut / 2 + shared_cut); - // Controller account will not get any reward. - assert_eq!(Balances::total_balance(&10), 1); - // Rest of the reward will be shared and paid to the nominator in stake. - assert_eq!(Balances::total_balance(&2), 500 + shared_cut / 2); + let taken_cut = commission * total_payout_1; + let shared_cut = total_payout_1 - taken_cut; + let reward_of_10 = shared_cut * exposure_1.own / exposure_1.total + taken_cut; + let reward_of_100 = shared_cut * exposure_1.others[0].value / exposure_1.total; + assert_eq_error_rate!(Balances::total_balance(&10), balance_era_1_10 + reward_of_10, 2); + assert_eq_error_rate!(Balances::total_balance(&100), balance_era_1_100 + reward_of_100, 2); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -985,6 +974,7 @@ fn bond_extra_works() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, })); // Give account 11 some large free balance greater than total @@ -998,6 +988,7 @@ fn bond_extra_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], + last_reward: None, })); // Call the bond_extra function with a large number, should handle it @@ -1008,6 +999,7 @@ fn bond_extra_works() { total: 1000000, active: 1000000, unlocking: vec![], + last_reward: None, })); }); } @@ -1027,7 +1019,7 @@ fn bond_extra_and_withdraw_unbonded_works() { let _ = Balances::make_free_balance_be(&11, 1000000); // Initial config should be correct - assert_eq!(Staking::current_era(), 0); + assert_eq!(Staking::active_era().unwrap().index, 0); assert_eq!(Session::current_index(), 0); // check the balance of a validator accounts. @@ -1042,8 +1034,9 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, })); - assert_eq!(Staking::stakers(&11), Exposure { total: 1000, own: 1000, others: vec![] }); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000, own: 1000, others: vec![] }); // deposit the extra 100 units Staking::bond_extra(Origin::signed(11), 100).unwrap(); @@ -1053,14 +1046,15 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], + last_reward: None, })); // Exposure is a snapshot! only updated after the next era update. - assert_ne!(Staking::stakers(&11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); + assert_ne!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); // trigger next era. Timestamp::set_timestamp(10); start_era(2); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Staking::active_era().unwrap().index, 2); // ledger should be the same. assert_eq!(Staking::ledger(&10), Some(StakingLedger { @@ -1068,20 +1062,21 @@ fn bond_extra_and_withdraw_unbonded_works() { total: 1000 + 100, active: 1000 + 100, unlocking: vec![], + last_reward: None, })); // Exposure is now updated. - assert_eq!(Staking::stakers(&11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); // Unbond almost all of the funds in stash. Staking::unbond(Origin::signed(10), 1000).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}] }) + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None }) ); // Attempting to free the balances now will fail. 2 eras need to pass. Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}] })); + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None })); // trigger next era. start_era(3); @@ -1089,7 +1084,7 @@ fn bond_extra_and_withdraw_unbonded_works() { // nothing yet Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}] })); + stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], last_reward: None })); // trigger next era. start_era(5); @@ -1097,7 +1092,7 @@ fn bond_extra_and_withdraw_unbonded_works() { Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); // Now the value is free and the staking ledger is updated. assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 100, active: 100, unlocking: vec![] })); + stash: 11, total: 100, active: 100, unlocking: vec![], last_reward: None })); }) } @@ -1158,11 +1153,12 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, }) ); start_era(2); - assert_eq!(Staking::current_era(), 2); + assert_eq!(Staking::active_era().unwrap().index, 2); // Try to rebond some funds. We get an error since no fund is unbonded. assert_noop!( @@ -1180,8 +1176,9 @@ fn rebond_works() { active: 100, unlocking: vec![UnlockChunk { value: 900, - era: 2 + 3 - },] + era: 2 + 3, + }], + last_reward: None, }) ); @@ -1194,6 +1191,7 @@ fn rebond_works() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, }) ); @@ -1206,6 +1204,7 @@ fn rebond_works() { total: 1000, active: 100, unlocking: vec![UnlockChunk { value: 900, era: 5 }], + last_reward: None, }) ); @@ -1218,6 +1217,7 @@ fn rebond_works() { total: 1000, active: 600, unlocking: vec![UnlockChunk { value: 400, era: 5 }], + last_reward: None, }) ); @@ -1229,7 +1229,8 @@ fn rebond_works() { stash: 11, total: 1000, active: 1000, - unlocking: vec![] + unlocking: vec![], + last_reward: None, }) ); @@ -1247,7 +1248,8 @@ fn rebond_works() { UnlockChunk { value: 300, era: 5 }, UnlockChunk { value: 300, era: 5 }, UnlockChunk { value: 300, era: 5 }, - ] + ], + last_reward: None, }) ); @@ -1262,7 +1264,8 @@ fn rebond_works() { unlocking: vec![ UnlockChunk { value: 300, era: 5 }, UnlockChunk { value: 100, era: 5 }, - ] + ], + last_reward: None, }) ); }) @@ -1295,6 +1298,7 @@ fn rebond_is_fifo() { total: 1000, active: 1000, unlocking: vec![], + last_reward: None, }) ); @@ -1310,7 +1314,8 @@ fn rebond_is_fifo() { active: 600, unlocking: vec![ UnlockChunk { value: 400, era: 2 + 3 }, - ] + ], + last_reward: None, }) ); @@ -1327,7 +1332,8 @@ fn rebond_is_fifo() { unlocking: vec![ UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, - ] + ], + last_reward: None, }) ); @@ -1345,7 +1351,8 @@ fn rebond_is_fifo() { UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 300, era: 3 + 3 }, UnlockChunk { value: 200, era: 4 + 3 }, - ] + ], + last_reward: None, }) ); @@ -1360,36 +1367,34 @@ fn rebond_is_fifo() { unlocking: vec![ UnlockChunk { value: 400, era: 2 + 3 }, UnlockChunk { value: 100, era: 3 + 3 }, - ] + ], + last_reward: None, }) ); }) } #[test] -fn slot_stake_is_least_staked_validator_and_exposure_defines_maximum_punishment() { - // Test that slot_stake is determined by the least staked validator - // Test that slot_stake is the maximum punishment that can happen to a validator +fn reward_to_stake_works() { ExtBuilder::default().nominate(false).fair(false).build().execute_with(|| { // Confirm validator count is 2 assert_eq!(Staking::validator_count(), 2); // Confirm account 10 and 20 are validators assert!(>::contains_key(&11) && >::contains_key(&21)); - assert_eq!(Staking::stakers(&11).total, 1000); - assert_eq!(Staking::stakers(&21).total, 2000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 2000); // Give the man some money. let _ = Balances::make_free_balance_be(&10, 1000); let _ = Balances::make_free_balance_be(&20, 1000); - // We confirm initialized slot_stake is this value - assert_eq!(Staking::slot_stake(), Staking::stakers(&11).total); + // Bypass logic and change current exposure + ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); // Now lets lower account 20 stake - >::insert(&21, Exposure { total: 69, own: 69, others: vec![] }); - assert_eq!(Staking::stakers(&21).total, 69); - >::insert(&20, StakingLedger { stash: 22, total: 69, active: 69, unlocking: vec![] }); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); + >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], last_reward: None }); // Compute total payout now for whole duration as other parameter won't change let total_payout_0 = current_total_payout_for_duration(3000); @@ -1399,19 +1404,23 @@ fn slot_stake_is_least_staked_validator_and_exposure_defines_maximum_punishment( // New era --> rewards are paid --> stakes are changed start_era(1); + mock::make_all_reward_payment(0); - // -- new balances + reward - assert_eq!(Staking::stakers(&11).total, 1000 + total_payout_0 / 2); - assert_eq!(Staking::stakers(&21).total, 69 + total_payout_0 / 2); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); let _11_balance = Balances::free_balance(&11); assert_eq!(_11_balance, 1000 + total_payout_0 / 2); - // -- slot stake should also be updated. - assert_eq!(Staking::slot_stake(), 69 + total_payout_0 / 2); + // Trigger another new era as the info are frozen before the era start. + start_era(2); + + // -- new infos + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69 + total_payout_0 / 2); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -1456,6 +1465,9 @@ fn on_free_balance_zero_stash_removes_validator() { // Check total balance of stash assert_eq!(Balances::total_balance(&11), 0); + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + // Check storage items do not exist assert!(!>::contains_key(&10)); assert!(!>::contains_key(&11)); @@ -1509,6 +1521,9 @@ fn on_free_balance_zero_stash_removes_nominator() { // Check total balance of stash assert_eq!(Balances::total_balance(&11), 0); + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + // Check storage items do not exist assert!(!>::contains_key(&10)); assert!(!>::contains_key(&11)); @@ -1523,8 +1538,6 @@ fn on_free_balance_zero_stash_removes_nominator() { fn switching_roles() { // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. ExtBuilder::default().nominate(false).build().execute_with(|| { - Timestamp::set_timestamp(1); // Initialize time. - // Reset reward destination for i in &[10, 20] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } @@ -1544,20 +1557,7 @@ fn switching_roles() { assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - // new block - start_session(1); - - // no change - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // new block - start_session(2); - - // no change - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // new block --> ne era --> new validators - start_session(3); + start_era(1); // with current nominators 10 and 5 have the most stake assert_eq_uvec!(validator_controllers(), vec![6, 10]); @@ -1571,18 +1571,12 @@ fn switching_roles() { // 2 : 2000 self vote + 250 vote. // Winners: 20 and 2 - start_session(4); - assert_eq_uvec!(validator_controllers(), vec![6, 10]); - - start_session(5); - assert_eq_uvec!(validator_controllers(), vec![6, 10]); + start_era(2); - // ne era - start_session(6); assert_eq_uvec!(validator_controllers(), vec![2, 20]); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -1626,6 +1620,7 @@ fn bond_with_no_staked_value() { ); // bonded with absolute minimum value possible. assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); + let current_era_at_bond = Staking::current_era(); assert_eq!(Balances::locks(&1)[0].amount, 5); // unbonding even 1 will cause all to be unbonded. @@ -1636,7 +1631,8 @@ fn bond_with_no_staked_value() { stash: 1, active: 0, total: 5, - unlocking: vec![UnlockChunk {value: 5, era: 3}] + unlocking: vec![UnlockChunk {value: 5, era: 3}], + last_reward: current_era_at_bond, }) ); @@ -1658,7 +1654,7 @@ fn bond_with_no_staked_value() { } #[test] -fn bond_with_little_staked_value_bounded_by_slot_stake() { +fn bond_with_little_staked_value_bounded() { // Behavior when someone bonds with little staked value. // Particularly when she votes and the candidate is elected. ExtBuilder::default() @@ -1677,36 +1673,40 @@ fn bond_with_little_staked_value_bounded_by_slot_stake() { assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); + // reward era 0 let total_payout_0 = current_total_payout_for_duration(3000); assert!(total_payout_0 > 100); // Test is meaningful if reward something reward_all_elected(); start_era(1); + mock::make_all_reward_payment(0); // 2 is elected. - // and fucks up the slot stake. assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - assert_eq!(Staking::slot_stake(), 1); + // And has minimal stake + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); // Old ones are rewarded. assert_eq!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3); // no rewards paid to 2. This was initial election. assert_eq!(Balances::free_balance(2), init_balance_2); + // reward era 1 let total_payout_1 = current_total_payout_for_duration(3000); assert!(total_payout_1 > 100); // Test is meaningful if reward something reward_all_elected(); start_era(2); + mock::make_all_reward_payment(1); assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - assert_eq!(Staking::slot_stake(), 1); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); assert_eq!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3); assert_eq!( Balances::free_balance(&10), init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, ); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }); } @@ -1726,8 +1726,8 @@ fn new_era_elects_correct_number_of_validators() { Session::on_initialize(System::block_number()); assert_eq!(validator_controllers().len(), 1); - check_exposure_all(); - check_nominator_all(); + check_exposure_all(Staking::active_era().unwrap().index); + check_nominator_all(Staking::active_era().unwrap().index); }) } @@ -1749,8 +1749,8 @@ fn phragmen_should_not_overflow_validators() { // This test will fail this. Will saturate. // check_exposure_all(); - assert_eq!(Staking::stakers(3).total, u64::max_value()); - assert_eq!(Staking::stakers(5).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 3).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 5).total, u64::max_value()); }) } @@ -1771,8 +1771,8 @@ fn phragmen_should_not_overflow_nominators() { assert_eq_uvec!(validator_controllers(), vec![4, 2]); // Saturate. - assert_eq!(Staking::stakers(3).total, u64::max_value()); - assert_eq!(Staking::stakers(5).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 3).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 5).total, u64::max_value()); }) } @@ -1790,8 +1790,8 @@ fn phragmen_should_not_overflow_ultimate() { assert_eq_uvec!(validator_controllers(), vec![4, 2]); // Saturate. - assert_eq!(Staking::stakers(3).total, u64::max_value()); - assert_eq!(Staking::stakers(5).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 3).total, u64::max_value()); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 5).total, u64::max_value()); }) } @@ -1806,10 +1806,19 @@ fn reward_validator_slashing_validator_doesnt_overflow() { // Set staker let _ = Balances::make_free_balance_be(&11, stake); - >::insert(&11, Exposure { total: stake, own: stake, others: vec![] }); + + let exposure = Exposure:: { total: stake, own: stake, others: vec![] }; + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(11, 1)].into_iter().collect(), + }; // Check reward - let _ = Staking::reward_validator(&11, reward_slash); + ErasRewardPoints::::insert(0, reward); + ErasStakers::::insert(0, 11, &exposure); + ErasStakersClipped::::insert(0, 11, exposure); + ErasValidatorReward::::insert(0, stake); + assert_ok!(Staking::payout_validator(Origin::signed(10), 0)); assert_eq!(Balances::total_balance(&11), stake * 2); // Set staker @@ -1819,16 +1828,18 @@ fn reward_validator_slashing_validator_doesnt_overflow() { // only slashes out of bonded stake are applied. without this line, // it is 0. Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); - >::insert(&11, Exposure { total: stake, own: 1, others: vec![ - IndividualExposure { who: 2, value: stake - 1 } - ]}); - + // Override exposure of 11 + ErasStakers::::insert(0, 11, Exposure { + total: stake, + own: 1, + others: vec![ IndividualExposure { who: 2, value: stake - 1 }] + }); // Check slashing on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -1849,45 +1860,49 @@ fn reward_from_authorship_event_handler_works() { >::note_author(11); >::note_uncle(21, 1); - // An uncle author that is not currently elected doesn't get rewards, - // but the block producer does get reward for referencing it. - >::note_uncle(31, 1); // Rewarding the same two times works. >::note_uncle(11, 1); // Not mandatory but must be coherent with rewards - assert_eq!(>::get(), vec![21, 11]); + assert_eq_uvec!(Session::validators(), vec![11, 21]); // 21 is rewarded as an uncle producer // 11 is rewarded as a block producer and uncle referencer and uncle producer - assert_eq!(CurrentEraPointsEarned::get().individual, vec![1, 20 + 2 * 3 + 1]); - assert_eq!(CurrentEraPointsEarned::get().total, 28); + assert_eq!( + ErasRewardPoints::::get(Staking::active_era().unwrap().index), + EraRewardPoints { + individual: vec![(11, 20 + 2 * 2 + 1), (21, 1)].into_iter().collect(), + total: 26, + }, + ); }) } #[test] fn add_reward_points_fns_works() { ExtBuilder::default().build().execute_with(|| { - let validators = >::current_elected(); // Not mandatory but must be coherent with rewards - assert_eq!(validators, vec![21, 11]); + assert_eq!(Session::validators(), vec![21, 11]); - >::reward_by_indices(vec![ - (0, 1), - (1, 1), - (2, 1), - (1, 1), + >::reward_by_ids(vec![ + (21, 1), + (11, 1), + (11, 1), ]); >::reward_by_ids(vec![ (21, 1), (11, 1), - (31, 1), (11, 1), ]); - assert_eq!(CurrentEraPointsEarned::get().individual, vec![2, 4]); - assert_eq!(CurrentEraPointsEarned::get().total, 6); + assert_eq!( + ErasRewardPoints::::get(Staking::active_era().unwrap().index), + EraRewardPoints { + individual: vec![(11, 4), (21, 2)].into_iter().collect(), + total: 6, + }, + ); }) } @@ -1910,19 +1925,20 @@ fn era_is_always_same_length() { // session changes. ExtBuilder::default().build().execute_with(|| { start_era(1); - assert_eq!(Staking::current_era_start_session_index(), SessionsPerEra::get()); + assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), SessionsPerEra::get()); start_era(2); - assert_eq!(Staking::current_era_start_session_index(), SessionsPerEra::get() * 2); + assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), SessionsPerEra::get() * 2); let session = Session::current_index(); ForceEra::put(Forcing::ForceNew); advance_session(); - assert_eq!(Staking::current_era(), 3); - assert_eq!(Staking::current_era_start_session_index(), session + 1); + advance_session(); + assert_eq!(Staking::active_era().unwrap().index, 3); + assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), session + 2); start_era(4); - assert_eq!(Staking::current_era_start_session_index(), session + SessionsPerEra::get() + 1); + assert_eq!(Staking::eras_start_session_index(Staking::active_era().unwrap().index).unwrap(), session + 2 + SessionsPerEra::get()); }); } @@ -1933,7 +1949,7 @@ fn offence_forces_new_era() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -1953,7 +1969,7 @@ fn offence_ensures_new_era_without_clobbering() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -1967,12 +1983,13 @@ fn offence_ensures_new_era_without_clobbering() { #[test] fn offence_deselects_validator_when_slash_is_zero() { ExtBuilder::default().build().execute_with(|| { + assert!(Session::validators().contains(&11)); assert!(>::contains_key(11)); on_offence_now( &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -1980,6 +1997,9 @@ fn offence_deselects_validator_when_slash_is_zero() { ); assert_eq!(Staking::force_era(), Forcing::ForceNew); assert!(!>::contains_key(11)); + start_era(1); + assert!(!Session::validators().contains(&11)); + assert!(!>::contains_key(11)); }); } @@ -1988,7 +2008,7 @@ fn slashing_performed_according_exposure() { // This test checks that slashing is performed according the exposure (or more precisely, // historical exposure), not the current balance. ExtBuilder::default().build().execute_with(|| { - assert_eq!(Staking::stakers(&11).own, 1000); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); // Handle an offence with a historical exposure. on_offence_now( @@ -2017,11 +2037,12 @@ fn slash_in_old_span_does_not_deselect() { start_era(1); assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); on_offence_now( &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -2035,6 +2056,7 @@ fn slash_in_old_span_does_not_deselect() { Staking::validate(Origin::signed(10), Default::default()).unwrap(); assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); + assert!(!Session::validators().contains(&11)); start_era(3); @@ -2045,7 +2067,7 @@ fn slash_in_old_span_does_not_deselect() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -2056,12 +2078,13 @@ fn slash_in_old_span_does_not_deselect() { // not for zero-slash. assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); on_offence_in_era( &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -2073,6 +2096,7 @@ fn slash_in_old_span_does_not_deselect() { // or non-zero. assert_eq!(Staking::force_era(), Forcing::NotForcing); assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); assert_ledger_consistent(11); }); } @@ -2085,13 +2109,13 @@ fn reporters_receive_their_slice() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::stakers(&11).total, initial_balance); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); on_offence_now( &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![1, 2], }], @@ -2116,13 +2140,13 @@ fn subsequent_reports_in_same_span_pay_out_less() { // The reporters' reward is calculated from the total exposure. let initial_balance = 1125; - assert_eq!(Staking::stakers(&11).total, initial_balance); + assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); on_offence_now( &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![1], }], @@ -2138,7 +2162,7 @@ fn subsequent_reports_in_same_span_pay_out_less() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![1], }], @@ -2162,7 +2186,7 @@ fn invulnerables_are_not_slashed() { assert_eq!(Balances::free_balance(11), 1000); assert_eq!(Balances::free_balance(21), 2000); - let exposure = Staking::stakers(&21); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); let initial_balance = Staking::slashable_balance_of(&21); let nominator_balances: Vec<_> = exposure.others @@ -2171,11 +2195,11 @@ fn invulnerables_are_not_slashed() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, OffenceDetails { - offender: (21, Staking::stakers(&21)), + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), reporters: vec![], }, ], @@ -2209,7 +2233,7 @@ fn dont_slash_if_fraction_is_zero() { &[OffenceDetails { offender: ( 11, - Staking::stakers(&11), + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), ), reporters: vec![], }], @@ -2230,7 +2254,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2244,7 +2268,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2257,7 +2281,7 @@ fn only_slash_for_max_in_era() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2278,7 +2302,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2292,7 +2316,7 @@ fn garbage_collection_after_slashing() { on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2303,6 +2327,10 @@ fn garbage_collection_after_slashing() { // so we don't test those here. assert_eq!(Balances::free_balance(11), 0); + assert_eq!(Balances::total_balance(&11), 0); + + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + assert!(::SlashingSpans::get(&11).is_none()); assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); }) @@ -2315,21 +2343,21 @@ fn garbage_collection_on_window_pruning() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], &[Perbill::from_percent(10)], ); - let now = Staking::current_era(); + let now = Staking::active_era().unwrap().index; assert_eq!(Balances::free_balance(11), 900); assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); @@ -2363,8 +2391,8 @@ fn slashing_nominators_by_span_max() { assert_eq!(Staking::slashable_balance_of(&21), 1000); - let exposure_11 = Staking::stakers(&11); - let exposure_21 = Staking::stakers(&21); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); assert_eq!(Balances::free_balance(101), 2000); let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2372,7 +2400,7 @@ fn slashing_nominators_by_span_max() { on_offence_in_era( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2406,7 +2434,7 @@ fn slashing_nominators_by_span_max() { on_offence_in_era( &[ OffenceDetails { - offender: (21, Staking::stakers(&21)), + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), reporters: vec![], }, ], @@ -2429,7 +2457,7 @@ fn slashing_nominators_by_span_max() { on_offence_in_era( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2465,7 +2493,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[ OffenceDetails { - offender: (21, Staking::stakers(&21)), + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), reporters: vec![], }, ], @@ -2490,7 +2518,7 @@ fn slashes_are_summed_across_spans() { on_offence_now( &[ OffenceDetails { - offender: (21, Staking::stakers(&21)), + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), reporters: vec![], }, ], @@ -2515,14 +2543,14 @@ fn deferred_slashes_are_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; on_offence_now( &[ OffenceDetails { - offender: (11, Staking::stakers(&11)), + offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), reporters: vec![], }, ], @@ -2558,7 +2586,7 @@ fn remove_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2628,7 +2656,7 @@ fn remove_multi_deferred() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -2644,7 +2672,7 @@ fn remove_multi_deferred() { on_offence_now( &[ OffenceDetails { - offender: (21, Staking::stakers(&21)), + offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), reporters: vec![], } ], @@ -2677,7 +2705,7 @@ fn slash_kicks_validators_not_nominators() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; @@ -2709,6 +2737,108 @@ fn slash_kicks_validators_not_nominators() { }); } +#[test] +fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { + // should check that: + // * rewards get paid until history_depth for both validators and nominators + // * an invalid era to claim doesn't update last_reward + // * double claim of one era fails + ExtBuilder::default().nominate(true).build().execute_with(|| { + let init_balance_10 = Balances::total_balance(&10); + let init_balance_100 = Balances::total_balance(&100); + + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_100 = Perbill::from_rational_approximation::(125, 1125); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_era(1); + + >::reward_by_ids(vec![(11, 1)]); + // Change total issuance in order to modify total payout + let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + assert!(total_payout_1 != total_payout_0); + + start_era(2); + + >::reward_by_ids(vec![(11, 1)]); + // Change total issuance in order to modify total payout + let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_2 = current_total_payout_for_duration(3000); + assert!(total_payout_2 > 10); // Test is meaningful if reward something + assert!(total_payout_2 != total_payout_0); + assert!(total_payout_2 != total_payout_1); + + start_era(Staking::history_depth() + 1); + + let active_era = Staking::active_era().unwrap().index; + + // This is the latest planned era in staking, not the active era + let current_era = Staking::current_era().unwrap(); + + // Last kept is 1: + assert!(current_era - Staking::history_depth() == 1); + assert_noop!( + Staking::payout_validator(Origin::signed(10), 0), + // Fail: Era out of history + Error::::InvalidEraToReward + ); + assert_ok!(Staking::payout_validator(Origin::signed(10), 1)); + assert_ok!(Staking::payout_validator(Origin::signed(10), 2)); + assert_noop!( + Staking::payout_validator(Origin::signed(10), 2), + // Fail: Double claim + Error::::InvalidEraToReward + ); + assert_noop!( + Staking::payout_validator(Origin::signed(10), active_era), + // Fail: Era not finished yet + Error::::InvalidEraToReward + ); + + assert_noop!( + Staking::payout_nominator(Origin::signed(100), 0, vec![(11, 0)]), + // Fail: Era out of history + Error::::InvalidEraToReward + ); + assert_ok!(Staking::payout_nominator(Origin::signed(100), 1, vec![(11, 0)])); + assert_ok!(Staking::payout_nominator(Origin::signed(100), 2, vec![(11, 0)])); + assert_noop!( + Staking::payout_nominator(Origin::signed(100), 2, vec![(11, 0)]), + // Fail: Double claim + Error::::InvalidEraToReward + ); + assert_noop!( + Staking::payout_nominator(Origin::signed(100), active_era, vec![(11, 0)]), + // Fail: Era not finished yet + Error::::InvalidEraToReward + ); + + // Era 0 can't be rewarded anymore and current era can't be rewarded yet + // only era 1 and 2 can be rewarded. + + assert_eq!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_1 + total_payout_2), + ); + assert_eq!( + Balances::total_balance(&100), + init_balance_100 + part_for_100 * (total_payout_1 + total_payout_2), + ); + }); +} + #[test] fn zero_slash_keeps_nominators() { ExtBuilder::default().build().execute_with(|| { @@ -2716,7 +2846,7 @@ fn zero_slash_keeps_nominators() { assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::stakers(&11); + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); assert_eq!(Balances::free_balance(101), 2000); on_offence_now( @@ -2746,3 +2876,122 @@ fn zero_slash_keeps_nominators() { assert!(nominations.submitted_in >= last_slash); }); } + +#[test] +fn six_session_delay() { + ExtBuilder::default().build().execute_with(|| { + use pallet_session::SessionManager; + + let val_set = Session::validators(); + let init_session = Session::current_index(); + let init_active_era = Staking::active_era().unwrap().index; + // pallet-session is delaying session by one, thus the next session to plan is +2. + assert_eq!(>::new_session(init_session + 2), None); + assert_eq!(>::new_session(init_session + 3), Some(val_set.clone())); + assert_eq!(>::new_session(init_session + 4), None); + assert_eq!(>::new_session(init_session + 5), None); + assert_eq!(>::new_session(init_session + 6), Some(val_set.clone())); + + >::end_session(init_session); + >::start_session(init_session + 1); + assert_eq!(Staking::active_era().unwrap().index, init_active_era); + >::end_session(init_session + 1); + >::start_session(init_session + 2); + assert_eq!(Staking::active_era().unwrap().index, init_active_era); + + // Reward current era + Staking::reward_by_ids(vec![(11, 1)]); + + // New active era is triggered here. + >::end_session(init_session + 2); + >::start_session(init_session + 3); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + >::end_session(init_session + 3); + >::start_session(init_session + 4); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + >::end_session(init_session + 4); + >::start_session(init_session + 5); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + + // Reward current era + Staking::reward_by_ids(vec![(21, 2)]); + + // New active era is triggered here. + >::end_session(init_session + 5); + >::start_session(init_session + 6); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 2); + + // That reward are correct + assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); + assert_eq!(Staking::eras_reward_points(init_active_era + 1).total, 2); + }); +} + +#[test] +fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { + // Test: + // * If nominator nomination is below the $MaxNominatorRewardedPerValidator other nominator + // then the nominator can't claim its reward + // * A nominator can't claim another nominator reward + ExtBuilder::default().build().execute_with(|| { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { + let stash = 10_000 + i as u64; + let controller = 20_000 + i as u64; + let balance = 10_000 + i as u64; + Balances::make_free_balance_be(&stash, balance); + assert_ok!( + Staking::bond( + Origin::signed(stash), + controller, + balance, + RewardDestination::Stash + ) + ); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); + } + mock::start_era(1); + + >::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment(1); + + // nominator 10_000 can't get its reward because exposure is clipped. However it will try + // to query other people reward. + assert_ok!(Staking::payout_nominator(Origin::signed(20_000), 1, vec![(11, 0)])); + + // Assert only nominators from 1 to Max are rewarded + for i in 0..=::MaxNominatorRewardedPerValidator::get() { + let stash = 10_000 + i as u64; + let balance = 10_000 + i as u64; + if stash == 10_000 { + assert!(Balances::free_balance(&stash) == balance); + } else { + assert!(Balances::free_balance(&stash) > balance); + } + } + }); +} + +#[test] +fn set_history_depth_works() { + ExtBuilder::default().build().execute_with(|| { + start_era(10); + Staking::set_history_depth(Origin::ROOT, 20).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 4).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 3).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 8).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + }); +} + diff --git a/frame/sudo/Cargo.toml b/frame/sudo/Cargo.toml index ac91129c574ad41c3e11ad772979a7c8b880e19d..8d645df85cb9457934099f2f05029b64ecee4e30 100644 --- a/frame/sudo/Cargo.toml +++ b/frame/sudo/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "pallet-sudo" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for sudo" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/support/Cargo.toml b/frame/support/Cargo.toml index b20f5c73bfd99911aa74467ac055fecea1fcacb2..78a7642d9f107cc576f95eb4e2a644e29c78c60c 100644 --- a/frame/support/Cargo.toml +++ b/frame/support/Cargo.toml @@ -1,32 +1,35 @@ [package] name = "frame-support" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Support code for the runtime." [dependencies] log = "0.4" serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } -frame-metadata = { version = "11.0.0", default-features = false, path = "../metadata" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io ={ path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../../primitives/arithmetic" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-support-procedural = { version = "2.0.0", path = "./procedural" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +frame-metadata = { version = "11.0.0-alpha.2", default-features = false, path = "../metadata" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io ={ path = "../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-arithmetic = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/arithmetic" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +frame-support-procedural = { version = "2.0.0-alpha.2", path = "./procedural" } paste = "0.1.6" once_cell = { version = "1", default-features = false, optional = true } -sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } +sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } bitmask = { version = "0.5.0", default-features = false } impl-trait-for-tuples = "0.1.3" tracing = { version = "0.1.10", optional = true } [dev-dependencies] pretty_assertions = "0.6.1" -frame-system = { version = "2.0.0", path = "../system" } +frame-system = { version = "2.0.0-alpha.2", path = "../system" } [features] default = ["std"] diff --git a/frame/support/procedural/Cargo.toml b/frame/support/procedural/Cargo.toml index 06a84750b58a2126414cef31b1f7eb527b43bc2d..8d8ecb18a761b94c22d16267531cb328cf7b7a35 100644 --- a/frame/support/procedural/Cargo.toml +++ b/frame/support/procedural/Cargo.toml @@ -1,15 +1,18 @@ [package] name = "frame-support-procedural" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Proc macro of Support code for the runtime." [lib] proc-macro = true [dependencies] -frame-support-procedural-tools = { version = "2.0.0", path = "./tools" } +frame-support-procedural-tools = { version = "2.0.0-alpha.2", path = "./tools" } proc-macro2 = "1.0.6" -quote = "1.0.2" +quote = "1.0.3" syn = { version = "1.0.7", features = ["full"] } diff --git a/frame/support/procedural/tools/Cargo.toml b/frame/support/procedural/tools/Cargo.toml index 4a1301e71261b63de2b7f002f135a8678a7f14fe..52773f6fbeea5a235c1b816494057774a0045dfa 100644 --- a/frame/support/procedural/tools/Cargo.toml +++ b/frame/support/procedural/tools/Cargo.toml @@ -1,13 +1,16 @@ [package] name = "frame-support-procedural-tools" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Proc macro helpers for procedural macros" [dependencies] -frame-support-procedural-tools-derive = { version = "2.0.0", path = "./derive" } +frame-support-procedural-tools-derive = { version = "2.0.0-alpha.2", path = "./derive" } proc-macro2 = "1.0.6" -quote = "1.0.2" +quote = "1.0.3" syn = { version = "1.0.7", features = ["full", "visit"] } proc-macro-crate = "0.1.4" diff --git a/frame/support/procedural/tools/derive/Cargo.toml b/frame/support/procedural/tools/derive/Cargo.toml index e65a7e0e5ff7b97b06d5a47ececbe8f8bbc5049e..6bed290c7dede02e1e12827d87a2d254d5360013 100644 --- a/frame/support/procedural/tools/derive/Cargo.toml +++ b/frame/support/procedural/tools/derive/Cargo.toml @@ -1,14 +1,17 @@ [package] name = "frame-support-procedural-tools-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Use to derive parsing for parsing struct." [lib] proc-macro = true [dependencies] proc-macro2 = "1.0.6" -quote = { version = "1.0.2", features = ["proc-macro"] } +quote = { version = "1.0.3", features = ["proc-macro"] } syn = { version = "1.0.7", features = ["proc-macro" ,"full", "extra-traits", "parsing"] } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 084ea285af4649383b0773af451f8cbaef56d204..a9c48097ad64ab6ecb28a10c7f7c66dd7b30cf18 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -196,6 +196,10 @@ impl Parameter for T where T: Codec + EncodeLike + Clone + Eq + fmt::Debug {} /// /// The following reserved functions also take the block number (with type `T::BlockNumber`) as an optional input: /// +/// * `on_runtime_upgrade`: Executes at the beginning of a block prior to on_initialize when there +/// is a runtime upgrade. This allows each module to upgrade its storage before the storage items are used. +/// As such, **calling other modules must be avoided**!! Using this function will implement the +/// [`OnRuntimeUpgrade`](../sp_runtime/traits/trait.OnRuntimeUpgrade.html) trait. /// * `on_initialize`: Executes at the beginning of a block. Using this function will /// implement the [`OnInitialize`](../sp_runtime/traits/trait.OnInitialize.html) trait. /// * `on_finalize`: Executes at the end of a block. Using this function will @@ -229,6 +233,7 @@ macro_rules! decl_module { {} {} {} + {} [] $($t)* ); @@ -261,6 +266,7 @@ macro_rules! decl_module { {} {} {} + {} [] $($t)* ); @@ -274,6 +280,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } {} { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -290,6 +297,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $vis fn deposit_event() = default; } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -305,6 +313,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } {} { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -326,6 +335,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } {} { $( $offchain:tt )* } { $( $constants:tt )* } @@ -342,6 +352,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } @@ -361,6 +372,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } {} { $( $offchain:tt )* } { $( $constants:tt )* } @@ -378,6 +390,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { #[weight = $weight] fn on_finalize( $( $param_name : $param ),* ) { $( $impl )* } @@ -389,6 +402,85 @@ macro_rules! decl_module { $($rest)* ); }; + // Add on_runtime_upgrade, without a given weight. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + {} + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } + $($rest:tt)* + ) => { + $crate::decl_module!(@normalize + $(#[$attr])* + pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> + for enum $call_type where origin: $origin_type, system = $system + { $( $other_where_bounds )* } + { $( $deposit_event )* } + { $( $on_initialize )* } + { + #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] + fn on_runtime_upgrade( $( $param_name : $param ),* ) { $( $impl )* } + } + { $( $on_finalize )* } + { $( $offchain )* } + { $( $constants )* } + { $( $error_type )* } + [ $( $dispatchables )* ] + $($rest)* + ); + }; + // Add on_runtime_upgrade, given weight. + (@normalize + $(#[$attr:meta])* + pub struct $mod_type:ident< + $trait_instance:ident: $trait_name:ident$(, I: $instantiable:path $(= $module_default_instance:path)?)? + > + for enum $call_type:ident where origin: $origin_type:ty, system = $system:ident + { $( $other_where_bounds:tt )* } + { $( $deposit_event:tt )* } + { $( $on_initialize:tt )* } + {} + { $( $on_finalize:tt )* } + { $( $offchain:tt )* } + { $( $constants:tt )* } + { $( $error_type:tt )* } + [ $( $dispatchables:tt )* ] + $(#[doc = $doc_attr:tt])* + #[weight = $weight:expr] + fn on_runtime_upgrade( $( $param_name:ident : $param:ty ),* $(,)? ) { $( $impl:tt )* } + $($rest:tt)* + ) => { + $crate::decl_module!(@normalize + $(#[$attr])* + pub struct $mod_type<$trait_instance: $trait_name$(, I: $instantiable $(= $module_default_instance)?)?> + for enum $call_type where origin: $origin_type, system = $system + { $( $other_where_bounds )* } + { $( $deposit_event )* } + { $( $on_initialize )* } + { + #[weight = $weight] + fn on_runtime_upgrade( $( $param_name : $param ),* ) { $( $impl )* } + } + { $( $on_finalize )* } + { $( $offchain )* } + { $( $constants )* } + { $( $error_type )* } + [ $( $dispatchables )* ] + $($rest)* + ); + }; // Add on_initialize, without a given weight. (@normalize $(#[$attr:meta])* @@ -399,6 +491,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } {} + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -418,6 +511,7 @@ macro_rules! decl_module { #[weight = $crate::dispatch::SimpleDispatchInfo::zero()] fn on_initialize( $( $param_name : $param ),* ) { $( $impl )* } } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -436,6 +530,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } {} + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -456,6 +551,7 @@ macro_rules! decl_module { #[weight = $weight] fn on_initialize( $( $param_name : $param ),* ) { $( $impl )* } } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -474,6 +570,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { } { $( $constants:tt )* } @@ -492,6 +589,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { fn offchain_worker( $( $param_name : $param ),* ) { $( $impl )* } } { $( $constants )* } @@ -512,6 +610,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -531,6 +630,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { @@ -555,6 +655,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -573,6 +674,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -592,6 +694,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -608,6 +711,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -628,6 +732,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -649,6 +754,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -676,6 +782,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -696,6 +803,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -717,6 +825,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -742,6 +851,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -767,6 +877,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -793,6 +904,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -808,6 +920,7 @@ macro_rules! decl_module { { $( $other_where_bounds )* } { $( $deposit_event )* } { $( $on_initialize )* } + { $( $on_runtime_upgrade )* } { $( $on_finalize )* } { $( $offchain )* } { $( $constants )* } @@ -903,6 +1016,39 @@ macro_rules! decl_module { {} }; + (@impl_on_runtime_upgrade + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + #[weight = $weight:expr] + fn on_runtime_upgrade() { $( $impl:tt )* } + ) => { + impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> + $crate::sp_runtime::traits::OnRuntimeUpgrade + for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* + { + fn on_runtime_upgrade() { + use $crate::sp_std::if_std; + if_std! { + use $crate::tracing; + let span = tracing::span!(tracing::Level::DEBUG, "on_runtime_upgrade"); + let _enter = span.enter(); + } + { $( $impl )* } + } + } + }; + + (@impl_on_runtime_upgrade + $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; + { $( $other_where_bounds:tt )* } + ) => { + impl<$trait_instance: $trait_name$(, $instance: $instantiable)?> + $crate::sp_runtime::traits::OnRuntimeUpgrade + for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* + {} + }; + + (@impl_on_finalize $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } @@ -961,6 +1107,10 @@ macro_rules! decl_module { (@impl_block_hooks_weight $module:ident<$trait_instance:ident: $trait_name:ident$(, $instance:ident: $instantiable:path)?>; { $( $other_where_bounds:tt )* } + @runtime_upgrade $( + #[weight = $weight_runtime_update:expr] + fn on_runtime_upgrade($( $param_runtime_upgrade:ident : $param_ty_runtime_upgrade:ty )*) { $( $impl_runtime_upgrade:tt )* } + )? @init $( #[weight = $weight_initialize:expr] fn on_initialize($( $param_initialize:ident : $param_ty_initialize:ty )*) { $( $impl_initialize:tt )* } @@ -974,6 +1124,11 @@ macro_rules! decl_module { $crate::dispatch::WeighBlock<$trait_instance::BlockNumber> for $module<$trait_instance$(, $instance)?> where $( $other_where_bounds )* { + $( + fn on_runtime_upgrade() -> $crate::dispatch::Weight { + >::weigh_data(&$weight_initialize, ()) + } + )? $( fn on_initialize(n: $trait_instance::BlockNumber) -> $crate::dispatch::Weight { >::weigh_data(&$weight_initialize, n) @@ -1208,6 +1363,7 @@ macro_rules! decl_module { { $( $other_where_bounds:tt )* } { $( $deposit_event:tt )* } { $( $on_initialize:tt )* } + { $( $on_runtime_upgrade:tt )* } { $( $on_finalize:tt )* } { $( $offchain:tt )* } { $( $constants:tt )* } @@ -1231,6 +1387,14 @@ macro_rules! decl_module { $( $on_initialize )* } + $crate::decl_module! { + @impl_on_runtime_upgrade + $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; + { $( $other_where_bounds )* } + $( $on_runtime_upgrade )* + } + + $crate::decl_module! { @impl_on_finalize $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; @@ -1242,6 +1406,7 @@ macro_rules! decl_module { @impl_block_hooks_weight $mod_type<$trait_instance: $trait_name $(, $instance: $instantiable)?>; { $( $other_where_bounds )* } + @runtime_upgrade $( $on_runtime_upgrade )* @init $( $on_initialize )* @fin $( $on_finalize )* } @@ -1869,15 +2034,12 @@ macro_rules! __check_reserved_fn_name { (on_initialize $( $rest:ident )*) => { $crate::__check_reserved_fn_name!(@compile_error on_initialize); }; - (on_initialise $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error_renamed on_initialise on_initialize); + (on_runtime_upgrade $( $rest:ident )*) => { + $crate::__check_reserved_fn_name!(@compile_error on_runtime_upgrade); }; (on_finalize $( $rest:ident )*) => { $crate::__check_reserved_fn_name!(@compile_error on_finalize); }; - (on_finalise $( $rest:ident )*) => { - $crate::__check_reserved_fn_name!(@compile_error_renamed on_finalise on_finalize); - }; (offchain_worker $( $rest:ident )*) => { $crate::__check_reserved_fn_name!(@compile_error offchain_worker); }; @@ -1914,7 +2076,7 @@ macro_rules! __check_reserved_fn_name { #[allow(dead_code)] mod tests { use super::*; - use crate::sp_runtime::traits::{OnInitialize, OnFinalize}; + use crate::sp_runtime::traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade}; use crate::weights::{DispatchInfo, DispatchClass}; use crate::traits::{CallMetadata, GetCallMetadata, GetCallName}; @@ -1936,8 +2098,8 @@ mod tests { } } - struct BLockWeight; - impl> WeighData for BLockWeight { + struct BlockWeight; + impl> WeighData for BlockWeight { fn weigh_data(&self, target: BlockNumber) -> Weight { let target: u32 = target.into(); if target % 2 == 0 { 10 } else { 0 } @@ -1957,8 +2119,10 @@ mod tests { #[weight = SimpleDispatchInfo::FixedNormal(7)] fn on_initialize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_initialize") } } - #[weight = BLockWeight] + #[weight = BlockWeight] fn on_finalize(n: T::BlockNumber) { if n.into() == 42 { panic!("on_finalize") } } + #[weight = SimpleDispatchInfo::FixedOperational(69)] + fn on_runtime_upgrade() { } fn offchain_worker() {} #[weight = SimpleDispatchInfo::FixedOperational(5)] @@ -2100,6 +2264,11 @@ mod tests { as OnFinalize>::on_finalize(42); } + #[test] + fn on_runtime_upgrade_should_work() { + as OnRuntimeUpgrade>::on_runtime_upgrade(); + } + #[test] fn weight_should_attach_to_call_enum() { // operational. diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index 4083576e298135df123052ea80ffda39ba8a6af6..9e26131f48949637be4a8779169e97bddc9396a0 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -91,6 +91,14 @@ impl> storage::StorageValue for G { unhashed::put(&Self::storage_value_final_key(), &val) } + fn set(maybe_val: Self::Query) { + if let Some(val) = G::from_query_to_optional_value(maybe_val) { + unhashed::put(&Self::storage_value_final_key(), &val) + } else { + unhashed::kill(&Self::storage_value_final_key()) + } + } + fn kill() { unhashed::kill(&Self::storage_value_final_key()) } diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index e8d58b46d4c4f0e18a693ce50317bc0c89b65422..291dceb4ea111fd90e0ecaf3b5dd6737a1f8f900 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -70,6 +70,11 @@ impl Iterator for StorageIterator { } } +/// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. +pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { + get_storage_value::<()>(module, item, hash).is_some() +} + /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { let mut key = vec![0u8; 32 + hash.len()]; diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 4bca5ea402398c349642bb103ae30f89aab27c74..e5d845cb22a957a7a3d1f7f68efbdf7550c9aa55 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -65,14 +65,18 @@ pub trait StorageValue { /// /// # Usage /// - /// This would typically be called inside the module implementation of on_initialize, while - /// ensuring **no usage of this storage are made before the call to `on_initialize`**. (More + /// This would typically be called inside the module implementation of on_runtime_upgrade, while + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More /// precisely prior initialized modules doesn't make use of this storage). fn translate) -> Option>(f: F) -> Result, ()>; /// Store a value under this key into the provided storage instance. fn put>(val: Arg); + /// Store a value under this key into the provided storage instance; this uses the query + /// type rather than the underlying value. + fn set(val: Self::Query); + /// Mutate the value fn mutate R>(f: F) -> R; @@ -265,8 +269,8 @@ pub trait StorageLinkedMap { /// /// # Usage /// - /// This would typically be called inside the module implementation of on_initialize, while - /// ensuring **no usage of this storage are made before the call to `on_initialize`**. (More + /// This would typically be called inside the module implementation of on_runtime_upgrade, while + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More /// precisely prior initialized modules doesn't make use of this storage). fn translate(translate_key: TK, translate_val: TV) -> Result<(), Option> where K2: FullCodec + Clone, V2: Decode, TK: Fn(K2) -> K, TV: Fn(V2) -> V; @@ -460,8 +464,8 @@ pub trait StoragePrefixedMap { /// /// # Usage /// - /// This would typically be called inside the module implementation of on_initialize, while - /// ensuring **no usage of this storage are made before the call to `on_initialize`**. (More + /// This would typically be called inside the module implementation of on_runtime_upgrade, while + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More /// precisely prior initialized modules doesn't make use of this storage). fn translate_values(translate_val: TV) -> Result<(), u32> where OldValue: Decode, TV: Fn(OldValue) -> Value diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index 54910e99a4d864407f53c1edeedaefa52ace40e8..1ecf46ef1864708b53e7e19073768196314be5b4 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -101,6 +101,10 @@ pub fn get_raw(key: &[u8]) -> Option> { } /// Put a raw byte slice into storage. +/// +/// **WARNING**: If you set the storage of the Substrate Wasm (`well_known_keys::CODE`), +/// you should also call `frame_system::RuntimeUpgraded::put(true)` to trigger the +/// `on_runtime_upgrade` logic. pub fn put_raw(key: &[u8], value: &[u8]) { sp_io::storage::set(key, value) } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 570342bf578e01064fb9ff85b1c87768fc3bfd8d..c1e9e7c317159b2b4eaeee51ade9762a7c311389 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -195,9 +195,9 @@ pub trait OnNewAccount { /// The account with the given id was reaped. #[impl_trait_for_tuples::impl_for_tuples(30)] -pub trait OnReapAccount { +pub trait OnKilledAccount { /// The account with the given id was reaped. - fn on_reap_account(who: &AccountId); + fn on_killed_account(who: &AccountId); } /// A trait for finding the author of a block header based on the `PreRuntime` digests contained @@ -734,7 +734,8 @@ pub trait VestingSchedule { type Currency: Currency; /// Get the amount that is currently being vested and cannot be transferred out of this account. - fn vesting_balance(who: &AccountId) -> >::Balance; + /// Returns `None` if the account has no vesting schedule. + fn vesting_balance(who: &AccountId) -> Option<>::Balance>; /// Adds a vesting schedule to a given account. /// @@ -807,6 +808,8 @@ impl WithdrawReasons { pub trait ChangeMembers { /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The /// new set is given by `new`, and need not be sorted. + /// + /// This resets any previous value of prime. fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { new.sort_unstable(); Self::change_members_sorted(incoming, outgoing, &new[..]); @@ -816,6 +819,8 @@ pub trait ChangeMembers { /// new set is thus given by `sorted_new` and **must be sorted**. /// /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. + /// + /// This resets any previous value of prime. fn change_members_sorted( incoming: &[AccountId], outgoing: &[AccountId], @@ -824,6 +829,8 @@ pub trait ChangeMembers { /// Set the new members; they **must already be sorted**. This will compute the diff and use it to /// call `change_members_sorted`. + /// + /// This resets any previous value of prime. fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); @@ -864,14 +871,20 @@ pub trait ChangeMembers { } (incoming, outgoing) } + + /// Set the prime member. + fn set_prime(_prime: Option) {} } impl ChangeMembers for () { fn change_members(_: &[T], _: &[T], _: Vec) {} fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} fn set_members_sorted(_: &[T], _: &[T]) {} + fn set_prime(_: Option) {} } + + /// Trait for type that can handle the initialization of account IDs at genesis. pub trait InitializeMembers { /// Initialize the members to the given `members`. diff --git a/frame/support/src/unsigned.rs b/frame/support/src/unsigned.rs index 319fa3adb448f1b359be522e2ac58e7ba62d9402..4289e4e474f53971f29f75208d9cf865b6e173a2 100644 --- a/frame/support/src/unsigned.rs +++ b/frame/support/src/unsigned.rs @@ -15,7 +15,6 @@ // along with Substrate. If not, see . #[doc(hidden)] -#[allow(deprecated)] pub use crate::sp_runtime::traits::ValidateUnsigned; #[doc(hidden)] pub use crate::sp_runtime::transaction_validity::{ @@ -66,7 +65,6 @@ macro_rules! impl_outer_validate_unsigned { $( $module:ident )* } ) => { - #[allow(deprecated)] // Allow ValidateUnsigned impl $crate::unsigned::ValidateUnsigned for $runtime { type Call = Call; @@ -109,7 +107,6 @@ mod test_partial_and_full_call { pub mod timestamp { pub struct Module; - #[allow(deprecated)] // Allow ValidateUnsigned impl super::super::ValidateUnsigned for Module { type Call = Call; diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index c46cca683ba934172c08a3b5437d1a7e9457d077..8926ed949302b304935a5df7431eae09a886eb57 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -67,9 +67,11 @@ pub trait ClassifyDispatch { fn classify_dispatch(&self, target: T) -> DispatchClass; } -/// Means of determining the weight of a block's life cycle hooks: `on_initialize`, `on_finalize` and -/// such. +/// Means of determining the weight of a block's life cycle hooks: `on_initialize`, `on_finalize`, +/// `on_runtime_upgrade`, and such. pub trait WeighBlock { + /// Return the weight of the block's on_runtime_upgrade hook. + fn on_runtime_upgrade() -> Weight { Zero::zero() } /// Return the weight of the block's on_initialize hook. fn on_initialize(_: BlockNumber) -> Weight { Zero::zero() } /// Return the weight of the block's on_finalize hook. @@ -87,6 +89,14 @@ pub trait PaysFee { /// Maybe I can do something to remove the duplicate code here. #[impl_for_tuples(30)] impl WeighBlock for SingleModule { + fn on_runtime_upgrade() -> Weight { + let mut accumulated_weight: Weight = Zero::zero(); + for_tuples!( + #( accumulated_weight = accumulated_weight.saturating_add(SingleModule::on_runtime_upgrade()); )* + ); + accumulated_weight + } + fn on_initialize(n: BlockNumber) -> Weight { let mut accumulated_weight: Weight = Zero::zero(); for_tuples!( diff --git a/frame/support/test/Cargo.toml b/frame/support/test/Cargo.toml index 71d3893426f0646ba14cca957c9a6e9915ee57b0..0a5595914b07b28ea416767753349ec253293dae 100644 --- a/frame/support/test/Cargo.toml +++ b/frame/support/test/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "frame-support-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] serde = { version = "1.0.101", default-features = false, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-io ={ path = "../../../primitives/io", default-features = false } -sp-state-machine = { version = "0.8", optional = true, path = "../../../primitives/state-machine" } -frame-support = { version = "2.0.0", default-features = false, path = "../" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../../primitives/inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../primitives/runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../../primitives/core" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-io ={ path = "../../../primitives/io", default-features = false , version = "2.0.0-alpha.2"} +sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../../primitives/state-machine" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/inherents" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/runtime" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../../primitives/core" } trybuild = "1.0.17" pretty_assertions = "0.6.1" diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.rs b/frame/support/test/tests/reserved_keyword/on_initialize.rs index e389529bca57ecbafa4c13bcae9f53dce03c2a1e..84feb2d93f36cfc2212f44a7fa646d42501355b8 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.rs +++ b/frame/support/test/tests/reserved_keyword/on_initialize.rs @@ -27,6 +27,6 @@ macro_rules! reserved { } } -reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); +reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); fn main() {} diff --git a/frame/support/test/tests/reserved_keyword/on_initialize.stderr b/frame/support/test/tests/reserved_keyword/on_initialize.stderr index 13c2ef8d2c8691b7a6a576612683e9c3f963671a..d20a6e11451e7d52f4ded9a810ddc81f0d58bbb8 100644 --- a/frame/support/test/tests/reserved_keyword/on_initialize.stderr +++ b/frame/support/test/tests/reserved_keyword/on_initialize.stderr @@ -1,47 +1,39 @@ error: Invalid call fn name: `on_finalize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:30:1 | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation +30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: Invalid call fn name: `on_initialize`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:30:1 | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation +30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) -error: `on_finalise` was renamed to `on_finalize`. Please rename your function accordingly. +error: Invalid call fn name: `on_runtime_upgrade`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:30:1 | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation - | - = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) - -error: `on_initialise` was renamed to `on_initialize`. Please rename your function accordingly. - --> $DIR/on_initialize.rs:30:1 - | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation +30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: Invalid call fn name: `offchain_worker`, name is reserved and doesn't match expected signature, please refer to `decl_module!` documentation to see the appropriate usage, or rename it to an unreserved keyword. --> $DIR/on_initialize.rs:30:1 | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation +30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) error: `deposit_event` function is reserved and must follow the syntax: `$vis:vis fn deposit_event() = default;` --> $DIR/on_initialize.rs:30:1 | -30 | reserved!(on_finalize on_initialize on_finalise on_initialise offchain_worker deposit_event); - | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation +30 | reserved!(on_finalize on_initialize on_runtime_upgrade offchain_worker deposit_event); + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ in this macro invocation | = note: this error originates in a macro outside of the current crate (in Nightly builds, run with -Z external-macro-backtrace for more info) diff --git a/frame/system/Cargo.toml b/frame/system/Cargo.toml index 631d8eecb4715da13851e1323839f415abf7cf17..4e350be1a90c78b8a18c4da1a77f6d9e4c54f905 100644 --- a/frame/system/Cargo.toml +++ b/frame/system/Cargo.toml @@ -1,25 +1,28 @@ [package] name = "frame-system" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME system module" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io ={ path = "../../primitives/io", default-features = false } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { path = "../../primitives/io", default-features = false, version = "2.0.0-alpha.2" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] criterion = "0.2.11" -sp-externalities = { version = "0.8.0", path = "../../primitives/externalities" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../test-utils/runtime/client" } +sp-externalities = { version = "0.8.0-alpha.2", path = "../../primitives/externalities" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../test-utils/runtime/client" } [features] default = ["std"] @@ -33,6 +36,7 @@ std = [ "sp-runtime/std", "sp-version/std", ] +runtime-benchmarks = [] [[bench]] name = "bench" diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 1e24cb2c0597425a5540646d782f4767cf783b5f..cfcaa6f64ac59ba96462bbe581b65559565d4b85 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -78,7 +78,7 @@ impl system::Trait for Runtime { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl module::Trait for Runtime { diff --git a/frame/system/rpc/runtime-api/Cargo.toml b/frame/system/rpc/runtime-api/Cargo.toml index 62440323768886eea4252e3a081807543dbdc48f..3bcc34698a56efcb82496a076bf9c507acd5d31d 100644 --- a/frame/system/rpc/runtime-api/Cargo.toml +++ b/frame/system/rpc/runtime-api/Cargo.toml @@ -1,13 +1,16 @@ [package] name = "frame-system-rpc-runtime-api" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime API definition required by System RPC extensions." [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } [features] default = ["std"] diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 875a1d7cfe293d38006f8d2707d506bdf6e67b42..f1286beac4e7b9fa68e95c0301612086ddb62867 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -93,6 +93,7 @@ use serde::Serialize; use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_std::map; +use sp_std::convert::Infallible; use sp_std::marker::PhantomData; use sp_std::fmt::Debug; use sp_version::RuntimeVersion; @@ -112,12 +113,12 @@ use sp_runtime::{ use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, storage, Parameter, + decl_module, decl_event, decl_storage, decl_error, storage, Parameter, ensure, debug, traits::{ - Contains, Get, ModuleToIndex, OnNewAccount, OnReapAccount, IsDeadAccount, Happened, + Contains, Get, ModuleToIndex, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, StoredMap }, - weights::{Weight, DispatchInfo, DispatchClass, SimpleDispatchInfo}, + weights::{Weight, DispatchInfo, DispatchClass, SimpleDispatchInfo, FunctionOf}, }; use codec::{Encode, Decode, FullCodec, EncodeLike}; @@ -219,7 +220,7 @@ pub trait Trait: 'static + Eq + Clone { /// A function that is invoked when an account has been determined to be dead. /// /// All resources should be cleaned up associated with the given account. - type OnReapAccount: OnReapAccount; + type OnKilledAccount: OnKilledAccount; } pub type DigestOf = generic::Digest<::Hash>; @@ -290,13 +291,29 @@ fn hash69 + Default>() -> T { /// which can't contain more than `u32::max_value()` items. type EventIndex = u32; +/// Type used to encode the number of references an account has. +pub type RefCount = u8; + +/// Information of an account. +#[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] +pub struct AccountInfo { + /// The number of transactions this account has sent. + pub nonce: Index, + /// The number of other modules that currently depend on this account's existence. The account + /// cannot be reaped until this is zero. + pub refcount: RefCount, + /// The additional data that belongs to this account. Used to store the balance(s) in a lot of + /// chains. + pub data: AccountData, +} + decl_storage! { trait Store for Module as System { /// The full account information for a particular account ID. // TODO: should be hasher(twox64_concat) - will need staged migration - // TODO: should not including T::Index (the nonce) // https://github.com/paritytech/substrate/issues/4917 - pub Account get(fn account): map hasher(blake2_256) T::AccountId => (T::Index, T::AccountData); + pub Account get(fn account): + map hasher(blake2_256) T::AccountId => AccountInfo; /// Total extrinsics count for the current block. ExtrinsicCount: Option; @@ -350,6 +367,9 @@ decl_storage! { /// the `EventIndex` then in case if the topic has the same contents on the next block /// no notification will be triggered thus the event might be lost. EventTopics get(fn event_topics): map hasher(blake2_256) T::Hash => Vec<(T::BlockNumber, EventIndex)>; + + /// A bool to track if the runtime was upgraded last block. + pub RuntimeUpgraded: bool; } add_extra_genesis { config(changes_trie_config): Option; @@ -384,7 +404,7 @@ decl_event!( /// A new account was created. NewAccount(AccountId), /// An account was reaped. - ReapedAccount(AccountId), + KilledAccount(AccountId), } ); @@ -407,6 +427,11 @@ decl_error! { /// /// Either calling `Core_version` or decoding `RuntimeVersion` failed. FailedToExtractRuntimeVersion, + + /// Suicide called when the account has non-default composite data. + NonDefaultComposite, + /// There is a non-zero reference count preventing the account from being purged. + NonZeroRefCount } } @@ -414,11 +439,15 @@ decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; - /// A big dispatch that will disallow any other transaction to be included. + /// A dispatch that will fill the block weight up to the given ratio. // TODO: This should only be available for testing, rather than in general usage, but // that's not possible at present (since it's within the decl_module macro). - #[weight = SimpleDispatchInfo::MaxOperational] - fn fill_block(origin) { + #[weight = FunctionOf( + |(ratio,): (&Perbill,)| *ratio * T::MaximumBlockWeight::get(), + DispatchClass::Operational, + true, + )] + fn fill_block(origin, _ratio: Perbill) { ensure_root(origin)?; } @@ -460,6 +489,7 @@ decl_module! { } storage::unhashed::put_raw(well_known_keys::CODE, &code); + RuntimeUpgraded::put(true); Self::deposit_event(RawEvent::CodeUpdated); } @@ -468,6 +498,7 @@ decl_module! { pub fn set_code_without_checks(origin, code: Vec) { ensure_root(origin)?; storage::unhashed::put_raw(well_known_keys::CODE, &code); + RuntimeUpgraded::put(true); Self::deposit_event(RawEvent::CodeUpdated); } @@ -495,6 +526,9 @@ decl_module! { ensure_root(origin)?; for i in &items { storage::unhashed::put_raw(&i.0, &i.1); + if i.0 == well_known_keys::CODE { + RuntimeUpgraded::put(true); + } } } @@ -513,6 +547,17 @@ decl_module! { ensure_root(origin)?; storage::unhashed::kill_prefix(&prefix); } + + /// Kill the sending account, assuming there are no references outstanding and the composite + /// data is equal to its default value. + #[weight = SimpleDispatchInfo::FixedOperational(25_000)] + fn suicide(origin) { + let who = ensure_signed(origin)?; + let account = Account::::get(&who); + ensure!(account.refcount == 0, Error::::NonZeroRefCount); + ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); + Account::::remove(who); + } } } @@ -633,13 +678,40 @@ impl Default for InitKind { } } +/// Reference status; can be either referenced or unreferenced. +pub enum RefStatus { + Referenced, + Unreferenced, +} + impl Module { /// Deposits an event into this block's event record. pub fn deposit_event(event: impl Into) { Self::deposit_event_indexed(&[], event.into()); } - /// Deposits an event into this block's event record adding this event + /// Increment the reference counter on an account. + pub fn inc_ref(who: &T::AccountId) { + Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); + } + + /// Decrement the reference counter on an account. This *MUST* only be done once for every time + /// you called `inc_ref` on `who`. + pub fn dec_ref(who: &T::AccountId) { + Account::::mutate(who, |a| a.refcount = a.refcount.saturating_sub(1)); + } + + /// The number of outstanding references for the account `who`. + pub fn refs(who: &T::AccountId) -> RefCount { + Account::::get(who).refcount + } + + /// True if the account has no outstanding references. + pub fn allow_death(who: &T::AccountId) -> bool { + Account::::get(who).refcount == 0 + } + + /// Deposits an event into this block's event record adding this event /// to the corresponding topic indexes. /// /// This will update storage entries that correspond to the specified topics. @@ -821,7 +893,7 @@ impl Module { /// Set the block number to something in particular. Can be used as an alternative to /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", test))] + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] pub fn set_block_number(n: T::BlockNumber) { >::put(n); } @@ -851,12 +923,12 @@ impl Module { /// Retrieve the account transaction counter from storage. pub fn account_nonce(who: impl EncodeLike) -> T::Index { - Account::::get(who).0 + Account::::get(who).nonce } /// Increment a particular account's nonce by 1. pub fn inc_account_nonce(who: impl EncodeLike) { - Account::::mutate(who, |a| a.0 += T::Index::one()); + Account::::mutate(who, |a| a.nonce += T::Index::one()); } /// Note what the extrinsic data of the current extrinsic index is. If this @@ -908,18 +980,28 @@ impl Module { Self::deposit_event(RawEvent::NewAccount(who)); } - /// Kill the account and reap any related information. - pub fn kill_account(who: T::AccountId) { - if Account::::contains_key(&who) { - Account::::remove(&who); - Self::on_killed_account(who); - } - } - /// Do anything that needs to be done after an account has been killed. fn on_killed_account(who: T::AccountId) { - T::OnReapAccount::on_reap_account(&who); - Self::deposit_event(RawEvent::ReapedAccount(who)); + T::OnKilledAccount::on_killed_account(&who); + Self::deposit_event(RawEvent::KilledAccount(who)); + } + + /// Remove an account from storage. This should only be done when its refs are zero or you'll + /// get storage leaks in other modules. Nonetheless we assume that the calling logic knows best. + /// + /// This is a no-op if the account doesn't already exist. If it does then it will ensure + /// cleanups (those in `on_killed_account`) take place. + fn kill_account(who: &T::AccountId) { + if Account::::contains_key(who) { + let account = Account::::take(who); + if account.refcount > 0 { + debug::debug!( + target: "system", + "WARNING: Referenced account deleted. This is probably a bug." + ); + } + Module::::on_killed_account(who.clone()); + } } } @@ -935,7 +1017,7 @@ impl Happened for CallOnCreatedAccount { pub struct CallKillAccount(PhantomData); impl Happened for CallKillAccount { fn happened(who: &T::AccountId) { - Module::::kill_account(who.clone()); + Module::::kill_account(who) } } @@ -944,53 +1026,45 @@ impl Happened for CallKillAccount { // Anything more complex will need more sophisticated logic. impl StoredMap for Module { fn get(k: &T::AccountId) -> T::AccountData { - Account::::get(k).1 + Account::::get(k).data } fn is_explicit(k: &T::AccountId) -> bool { Account::::contains_key(k) } - fn insert(k: &T::AccountId, t: T::AccountData) { + fn insert(k: &T::AccountId, data: T::AccountData) { let existed = Account::::contains_key(k); - Account::::insert(k, (T::Index::default(), t)); + Account::::mutate(k, |a| a.data = data); if !existed { Self::on_created_account(k.clone()); } } fn remove(k: &T::AccountId) { - if Account::::contains_key(&k) { - Self::kill_account(k.clone()); - } + Self::kill_account(k) } fn mutate(k: &T::AccountId, f: impl FnOnce(&mut T::AccountData) -> R) -> R { let existed = Account::::contains_key(k); - let r = Account::::mutate(k, |a| f(&mut a.1)); + let r = Account::::mutate(k, |a| f(&mut a.data)); if !existed { Self::on_created_account(k.clone()); } r } fn mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> R) -> R { - let (existed, exists, r) = Account::::mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let (maybe_nonce, mut maybe_extra) = split_inner(maybe_value.take(), |v| v); - let r = f(&mut maybe_extra); - *maybe_value = maybe_extra.map(|extra| (maybe_nonce.unwrap_or_default(), extra)); - (existed, maybe_value.is_some(), r) - }); - if !existed && exists { - Self::on_created_account(k.clone()); - } else if existed && !exists { - Self::on_killed_account(k.clone()); - } - r + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }).expect("Infallible; qed") } fn try_mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> Result) -> Result { Account::::try_mutate_exists(k, |maybe_value| { let existed = maybe_value.is_some(); - let (maybe_nonce, mut maybe_extra) = split_inner(maybe_value.take(), |v| v); - f(&mut maybe_extra).map(|v| { - *maybe_value = maybe_extra.map(|extra| (maybe_nonce.unwrap_or_default(), extra)); - (existed, maybe_value.is_some(), v) + let (maybe_prefix, mut maybe_data) = split_inner( + maybe_value.take(), + |account| ((account.nonce, account.refcount), account.data) + ); + f(&mut maybe_data).map(|result| { + *maybe_value = maybe_data.map(|data| { + let (nonce, refcount) = maybe_prefix.unwrap_or_default(); + AccountInfo { nonce, refcount, data } + }); + (existed, maybe_value.is_some(), result) }) }).map(|(existed, exists, v)| { if !existed && exists { @@ -1080,6 +1154,34 @@ impl CheckWeight { pub fn new() -> Self { Self(PhantomData) } + + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. + /// + /// It checks and notes the new weight and length. + fn do_pre_dispatch( + info: ::DispatchInfo, + len: usize, + ) -> Result<(), TransactionValidityError> { + let next_len = Self::check_block_length(info, len)?; + let next_weight = Self::check_weight(info)?; + AllExtrinsicsLen::put(next_len); + AllExtrinsicsWeight::put(next_weight); + Ok(()) + } + + /// Do the validate checks. This can be applied to both signed and unsigned. + /// + /// It only checks that the block weight and length limit will not exceed. + fn do_validate( + info: ::DispatchInfo, + len: usize, + ) -> TransactionValidity { + // ignore the next weight and length. If they return `Ok`, then it is below the limit. + let _ = Self::check_block_length(info, len)?; + let _ = Self::check_weight(info)?; + + Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) + } } impl SignedExtension for CheckWeight { @@ -1099,11 +1201,7 @@ impl SignedExtension for CheckWeight { info: Self::DispatchInfo, len: usize, ) -> Result<(), TransactionValidityError> { - let next_len = Self::check_block_length(info, len)?; - AllExtrinsicsLen::put(next_len); - let next_weight = Self::check_weight(info)?; - AllExtrinsicsWeight::put(next_weight); - Ok(()) + Self::do_pre_dispatch(info, len) } fn validate( @@ -1113,18 +1211,23 @@ impl SignedExtension for CheckWeight { info: Self::DispatchInfo, len: usize, ) -> TransactionValidity { - // There is no point in writing to storage here since changes are discarded. This basically - // discards any transaction which is bigger than the length or weight limit **alone**, which - // is a guarantee that it will fail in the pre-dispatch phase. - if let Err(e) = Self::check_block_length(info, len) { - return Err(e); - } + Self::do_validate(info, len) + } - if let Err(e) = Self::check_weight(info) { - return Err(e); - } + fn pre_dispatch_unsigned( + _call: &Self::Call, + info: Self::DispatchInfo, + len: usize, + ) -> Result<(), TransactionValidityError> { + Self::do_pre_dispatch(info, len) + } - Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) + fn validate_unsigned( + _call: &Self::Call, + info: Self::DispatchInfo, + len: usize, + ) -> TransactionValidity { + Self::do_validate(info, len) } } @@ -1180,17 +1283,18 @@ impl SignedExtension for CheckNonce { _info: Self::DispatchInfo, _len: usize, ) -> Result<(), TransactionValidityError> { - let (expected, extra) = Account::::get(who); - if self.0 != expected { + let mut account = Account::::get(who); + if self.0 != account.nonce { return Err( - if self.0 < expected { + if self.0 < account.nonce { InvalidTransaction::Stale } else { InvalidTransaction::Future }.into() ) } - Account::::insert(who, (expected + T::Index::one(), extra)); + account.nonce += T::Index::one(); + Account::::insert(who, account); Ok(()) } @@ -1202,13 +1306,13 @@ impl SignedExtension for CheckNonce { _len: usize, ) -> TransactionValidity { // check index - let (expected, _extra) = Account::::get(who); - if self.0 < expected { + let account = Account::::get(who); + if self.0 < account.nonce { return InvalidTransaction::Stale.into() } let provides = vec![Encode::encode(&(who, self.0))]; - let requires = if expected < self.0 { + let requires = if account.nonce < self.0 { vec![Encode::encode(&(who, self.0 - One::one()))] } else { vec![] @@ -1378,6 +1482,7 @@ impl Lookup for ChainContext { #[cfg(test)] mod tests { use super::*; + use sp_std::cell::RefCell; use sp_core::H256; use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header, DispatchError}; use frame_support::{impl_outer_origin, parameter_types}; @@ -1404,6 +1509,15 @@ mod tests { }; } + thread_local!{ + pub static KILLED: RefCell> = RefCell::new(vec![]); + } + + pub struct RecordKilled; + impl OnKilledAccount for RecordKilled { + fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } + } + impl Trait for Test { type Origin = Origin; type Call = (); @@ -1421,9 +1535,9 @@ mod tests { type MaximumBlockLength = MaximumBlockLength; type Version = Version; type ModuleToIndex = (); - type AccountData = (); + type AccountData = u32; type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = RecordKilled; } impl From> for u16 { @@ -1460,6 +1574,27 @@ mod tests { assert_eq!(x, Ok(RawOrigin::::Signed(1u64))); } + #[test] + fn stored_map_works() { + new_test_ext().execute_with(|| { + System::insert(&0, 42); + assert!(System::allow_death(&0)); + + System::inc_ref(&0); + assert!(!System::allow_death(&0)); + + System::insert(&0, 69); + assert!(!System::allow_death(&0)); + + System::dec_ref(&0); + assert!(System::allow_death(&0)); + + assert!(KILLED.with(|r| r.borrow().is_empty())); + System::kill_account(&0); + assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); + }); + } + #[test] fn deposit_event_should_work() { new_test_ext().execute_with(|| { @@ -1612,7 +1747,7 @@ mod tests { #[test] fn signed_ext_check_nonce_works() { new_test_ext().execute_with(|| { - Account::::insert(1, (1, ())); + Account::::insert(1, AccountInfo { nonce: 1, refcount: 0, data: 0 }); let info = DispatchInfo::default(); let len = 0_usize; // stale @@ -1844,6 +1979,26 @@ mod tests { System::events(), vec![EventRecord { phase: Phase::ApplyExtrinsic(0), event: 102u16, topics: vec![] }], ); + + assert_eq!(RuntimeUpgraded::get(), true); + }); + } + + #[test] + fn runtime_upgraded_with_set_storage() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_storage( + RawOrigin::Root.into(), + vec![( + well_known_keys::CODE.to_vec(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec() + )], + ).unwrap(); + + assert_eq!(RuntimeUpgraded::get(), true); }); } } diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index 507092f626dcfdb5e77f52a344a77d6c5508cbb7..5abafe7655172c8b41d81a197d9510730df83f68 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -128,19 +128,20 @@ pub trait SignAndSubmitTransaction { fn sign_and_submit(call: impl Into, public: PublicOf) -> Result<(), ()> { let call = call.into(); let id = public.clone().into_account(); - let (expected_nonce, extra) = super::Account::::get(&id); + let mut account = super::Account::::get(&id); debug::native::debug!( target: "offchain", "Creating signed transaction from account: {:?} (nonce: {:?})", id, - expected_nonce, + account.nonce, ); let (call, signature_data) = Self::CreateTransaction - ::create_transaction::(call, public, id.clone(), expected_nonce) + ::create_transaction::(call, public, id.clone(), account.nonce) .ok_or(())?; // increment the nonce. This is fine, since the code should always // be running in off-chain context, so we NEVER persists data. - super::Account::::insert(&id, (expected_nonce + One::one(), extra)); + account.nonce += One::one(); + super::Account::::insert(&id, account); let xt = Self::Extrinsic::new(call, Some(signature_data)).ok_or(())?; sp_io::offchain::submit_transaction(xt.encode()) diff --git a/frame/timestamp/Cargo.toml b/frame/timestamp/Cargo.toml index 107374799d36d4e026f17b81090c110d3b152395..ff5b72de670e6cfd364c008e82ff3b0ec168be2d 100644 --- a/frame/timestamp/Cargo.toml +++ b/frame/timestamp/Cargo.toml @@ -1,26 +1,31 @@ [package] name = "pallet-timestamp" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME Timestamp Module" +documentation = "https://docs.rs/pallet-timestamp" + [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -frame-benchmarking = { version = "2.0.0", default-features = false, path = "../benchmarking" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../primitives/timestamp" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io", optional = true } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +frame-benchmarking = { version = "2.0.0-alpha.2", default-features = false, path = "../benchmarking", optional = true } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/timestamp" } impl-trait-for-tuples = "0.1.3" [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] @@ -35,3 +40,4 @@ std = [ "frame-system/std", "sp-timestamp/std" ] +runtime-benchmarks = ["frame-benchmarking", "sp-io"] diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 9310d39dfdec291f8eff4a90403f55e3bf4e9a33..5f69cdbe7e0463d0698b2e9fc12df536c51dc9e9 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -21,88 +21,16 @@ use super::*; use sp_std::prelude::*; use frame_system::RawOrigin; -use frame_benchmarking::{ - BenchmarkResults, BenchmarkParameter, selected_benchmark, benchmarking, - Benchmarking, BenchmarkingSetup, -}; -use sp_runtime::traits::Dispatchable; +use frame_benchmarking::benchmarks; -/// Benchmark `set` extrinsic. -struct Set; -impl BenchmarkingSetup, RawOrigin> for Set { - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)> { - vec![ - // Current time ("Now") - (BenchmarkParameter::N, 1, 100), - ] - } - - fn instance(&self, components: &[(BenchmarkParameter, u32)]) - -> Result<(Call, RawOrigin), &'static str> - { - let user_origin = RawOrigin::None; - let now = components.iter().find(|&c| c.0 == BenchmarkParameter::N).unwrap().1; +const MAX_TIME: u32 = 100; - // Return the `set` call - Ok((Call::::set(now.into()), user_origin)) +benchmarks! { + _ { + let n in 1 .. MAX_TIME => (); } -} - -selected_benchmark!(Set); - -impl Benchmarking for Module { - fn run_benchmark(extrinsic: Vec, steps: u32, repeat: u32) -> Result, &'static str> { - // Map the input to the selected benchmark. - let selected_benchmark = match extrinsic.as_slice() { - b"set" => SelectedBenchmark::Set, - _ => return Err("Could not find extrinsic."), - }; - - // Warm up the DB - benchmarking::commit_db(); - benchmarking::wipe_db(); - let components = , RawOrigin>>::components(&selected_benchmark); - let mut results: Vec = Vec::new(); - - // Select the component we will be benchmarking. Each component will be benchmarked. - for (name, low, high) in components.iter() { - // Create up to `STEPS` steps for that component between high and low. - let step_size = ((high - low) / steps).max(1); - let num_of_steps = (high - low) / step_size; - for s in 0..num_of_steps { - // This is the value we will be testing for component `name` - let component_value = low + step_size * s; - - // Select the mid value for all the other components. - let c: Vec<(BenchmarkParameter, u32)> = components.iter() - .map(|(n, l, h)| - (*n, if n == name { component_value } else { (h - l) / 2 + l }) - ).collect(); - - // Run the benchmark `repeat` times. - for _ in 0..repeat { - // Set up the externalities environment for the setup we want to benchmark. - let (call, caller) = , - RawOrigin, - >>::instance(&selected_benchmark, &c)?; - // Commit the externalities to the database, flushing the DB cache. - // This will enable worst case scenario for reading from the database. - benchmarking::commit_db(); - // Run the benchmark. - let start = benchmarking::current_time(); - call.dispatch(caller.into())?; - let finish = benchmarking::current_time(); - let elapsed = finish - start; - results.push((c.clone(), elapsed)); - // Wipe the DB back to the genesis state. - benchmarking::wipe_db(); - } - } - } - - return Ok(results); - } + set { + let n in ...; + }: _(RawOrigin::None, n.into()) } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index de36b65851be35305d064673644aaebe4f35691c..2a37dfdddb62a89fc7a3aea9f2ce9ef8880421a7 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -90,6 +90,7 @@ #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "runtime-benchmarks")] mod benchmarking; use sp_std::{result, cmp}; @@ -278,7 +279,7 @@ mod tests { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } parameter_types! { pub const MinimumPeriod: u64 = 5; diff --git a/frame/transaction-payment/Cargo.toml b/frame/transaction-payment/Cargo.toml index ac3dbb2c2b16a25f6d13489952e526a2ce237ac6..3a291dc51631288b43415039c4c1e831800c2d84 100644 --- a/frame/transaction-payment/Cargo.toml +++ b/frame/transaction-payment/Cargo.toml @@ -1,22 +1,25 @@ [package] name = "pallet-transaction-payment" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage transaction payments" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "./rpc/runtime-api" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "./rpc/runtime-api" } [dev-dependencies] -sp-io = { version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-io = { version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/transaction-payment/rpc/Cargo.toml b/frame/transaction-payment/rpc/Cargo.toml index 5fb5f5e2745d6f553810ee3525210d05b23e2192..309dfeedd5be420bb84615c1e0701ef74b5a58e4 100644 --- a/frame/transaction-payment/rpc/Cargo.toml +++ b/frame/transaction-payment/rpc/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "pallet-transaction-payment-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "RPC interface for the transaction payment module." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0" } +codec = { package = "parity-scale-codec", version = "1.2.0" } jsonrpc-core = "14.0.3" jsonrpc-core-client = "14.0.3" jsonrpc-derive = "14.0.3" -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -sp-rpc = { version = "2.0.0", path = "../../../primitives/rpc" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +sp-rpc = { version = "2.0.0-alpha.2", path = "../../../primitives/rpc" } serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0", path = "./runtime-api" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +pallet-transaction-payment-rpc-runtime-api = { version = "2.0.0-alpha.2", path = "./runtime-api" } diff --git a/frame/transaction-payment/rpc/runtime-api/Cargo.toml b/frame/transaction-payment/rpc/runtime-api/Cargo.toml index 8ee16a90dc3fcf08a90b824639ecac0d3cfb857d..96716769a39a89802544498acaabf52bab2c701c 100644 --- a/frame/transaction-payment/rpc/runtime-api/Cargo.toml +++ b/frame/transaction-payment/rpc/runtime-api/Cargo.toml @@ -1,17 +1,20 @@ [package] name = "pallet-transaction-payment-rpc-runtime-api" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "RPC runtime API for transaction payment FRAME pallet" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../../../../primitives/api" } -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../../../support" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/api" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../../support" } [dev-dependencies] serde_json = "1.0.41" diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 0d9cfc0b77683ff3016654a63ac9a51c1ad22eab..3de0f944debceab8e269c6623310557deef741a2 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -119,7 +119,8 @@ impl Module { { let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - let partial_fee = >::compute_fee(len, dispatch_info, 0u32.into()); + let partial_fee = + >::compute_fee(len, dispatch_info, 0u32.into()); let DispatchInfo { weight, class, .. } = dispatch_info; RuntimeDispatchInfo { weight, class, partial_fee } @@ -165,9 +166,10 @@ impl ChargeTransactionPayment { let len_fee = per_byte.saturating_mul(len); let weight_fee = { - // cap the weight to the maximum defined in runtime, otherwise it will be the `Bounded` - // maximum of its data type, which is not desired. - let capped_weight = info.weight.min(::MaximumBlockWeight::get()); + // cap the weight to the maximum defined in runtime, otherwise it will be the + // `Bounded` maximum of its data type, which is not desired. + let capped_weight = info.weight + .min(::MaximumBlockWeight::get()); T::WeightToFee::convert(capped_weight) }; @@ -248,20 +250,21 @@ mod tests { use super::*; use codec::Encode; use frame_support::{ - parameter_types, impl_outer_origin, impl_outer_dispatch, + impl_outer_dispatch, impl_outer_origin, parameter_types, weights::{DispatchClass, DispatchInfo, GetDispatchInfo, Weight}, }; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; use sp_core::H256; use sp_runtime::{ - Perbill, testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup, Extrinsic}, + traits::{BlakeTwo256, Extrinsic, IdentityLookup}, + Perbill, }; - use pallet_balances::Call as BalancesCall; - use sp_std::cell::RefCell; - use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; + use std::cell::RefCell; - const CALL: &::Call = &Call::Balances(BalancesCall::transfer(2, 69)); + const CALL: &::Call = + &Call::Balances(BalancesCall::transfer(2, 69)); impl_outer_dispatch! { pub enum Call for Runtime where origin: Origin { @@ -304,7 +307,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { @@ -318,7 +321,7 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; } -thread_local! { + thread_local! { static TRANSACTION_BASE_FEE: RefCell = RefCell::new(0); static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); static WEIGHT_TO_FEE: RefCell = RefCell::new(1); @@ -373,10 +376,16 @@ thread_local! { } impl ExtBuilder { - pub fn fees(mut self, base: u64, byte: u64, weight: u64) -> Self { - self.base_fee = base; - self.byte_fee = byte; - self.weight_to_fee = weight; + pub fn base_fee(mut self, base_fee: u64) -> Self { + self.base_fee = base_fee; + self + } + pub fn byte_fee(mut self, byte_fee: u64) -> Self { + self.byte_fee = byte_fee; + self + } + pub fn weight_fee(mut self, weight_to_fee: u64) -> Self { + self.weight_to_fee = weight_to_fee; self } pub fn balance_factor(mut self, factor: u64) -> Self { @@ -416,9 +425,9 @@ thread_local! { #[test] fn signed_extension_transaction_payment_work() { - ExtBuilder::default() - .balance_factor(10) // 100 - .fees(5, 1, 1) // 5 fixed, 1 per byte, 1 per weight + ExtBuilder::default() + .balance_factor(10) + .base_fee(5) .build() .execute_with(|| { @@ -441,9 +450,9 @@ thread_local! { #[test] fn signed_extension_transaction_payment_is_bounded() { - ExtBuilder::default() + ExtBuilder::default() .balance_factor(1000) - .fees(0, 0, 1) + .byte_fee(0) .build() .execute_with(|| { @@ -464,7 +473,7 @@ thread_local! { #[test] fn signed_extension_allows_free_transactions() { ExtBuilder::default() - .fees(100, 1, 1) + .base_fee(100) .balance_factor(0) .build() .execute_with(|| @@ -503,7 +512,7 @@ thread_local! { #[test] fn signed_ext_length_fee_is_also_updated_per_congestion() { ExtBuilder::default() - .fees(5, 1, 1) + .base_fee(5) .balance_factor(10) .build() .execute_with(|| @@ -531,7 +540,8 @@ thread_local! { let ext = xt.encode(); let len = ext.len() as u32; ExtBuilder::default() - .fees(5, 1, 2) + .base_fee(5) + .weight_fee(2) .build() .execute_with(|| { @@ -558,10 +568,11 @@ thread_local! { #[test] fn compute_fee_works_without_multiplier() { ExtBuilder::default() - .fees(100, 10, 1) - .balance_factor(0) - .build() - .execute_with(|| + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { // Next fee multiplier is zero assert_eq!(NextFeeMultiplier::get(), Fixed64::from_natural(0)); @@ -597,10 +608,11 @@ thread_local! { #[test] fn compute_fee_works_with_multiplier() { ExtBuilder::default() - .fees(100, 10, 1) - .balance_factor(0) - .build() - .execute_with(|| + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { // Add a next fee multiplier NextFeeMultiplier::put(Fixed64::from_rational(1, 2)); // = 1/2 = .5 @@ -629,10 +641,11 @@ thread_local! { #[test] fn compute_fee_does_not_overflow() { ExtBuilder::default() - .fees(100, 10, 1) - .balance_factor(0) - .build() - .execute_with(|| + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { // Overflow is handled let dispatch_info = DispatchInfo { diff --git a/frame/treasury/Cargo.toml b/frame/treasury/Cargo.toml index 10226f3572bb1079e9100cc9a57a26d032f80717..99071e927635e0948bc47a065589e142396eab7e 100644 --- a/frame/treasury/Cargo.toml +++ b/frame/treasury/Cargo.toml @@ -1,22 +1,25 @@ [package] name = "pallet-treasury" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet to manage treasury" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -pallet-balances = { version = "2.0.0", default-features = false, path = "../balances" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +pallet-balances = { version = "2.0.0-alpha.2", default-features = false, path = "../balances" } [dev-dependencies] -sp-io ={ version = "2.0.0", path = "../../primitives/io" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } [features] default = ["std"] diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index 192dd1aff61163d5ab3ba9074f95c9a378b5a507..bbf31cc599da0ec63ce8f1fd77fcd917bc223d80 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -760,7 +760,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/utility/Cargo.toml b/frame/utility/Cargo.toml index 6d1187b0eeafbce02a510b7edc03942a3b1149ea..46f59a191ae9f9122b209ce15b20825d324f248f 100644 --- a/frame/utility/Cargo.toml +++ b/frame/utility/Cargo.toml @@ -1,23 +1,26 @@ [package] name = "pallet-utility" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME utilities pallet" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } [features] default = ["std"] diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index da099fcb4ad008476d02f9695fb958b76de3d095..0b60532c3dd23c35d09ff09717eb79d3217ef212 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -258,7 +258,7 @@ decl_module! { /// - The weight of the `call` + 10,000. /// # #[weight = FunctionOf( - |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, + |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().class, true )] @@ -410,7 +410,7 @@ decl_module! { #[weight = FunctionOf( |args: (&u16, &Vec, &Option>, &[u8; 32])| { 10_000 * (args.1.len() as u32 + 1) - }, + }, DispatchClass::Normal, true )] @@ -485,7 +485,7 @@ decl_module! { #[weight = FunctionOf( |args: (&u16, &Vec, &Timepoint, &[u8; 32])| { 10_000 * (args.1.len() as u32 + 1) - }, + }, DispatchClass::Normal, true )] @@ -538,7 +538,7 @@ impl Module { pub fn timepoint() -> Timepoint { Timepoint { height: >::block_number(), - index: >::extrinsic_count(), + index: >::extrinsic_index().unwrap_or_default(), } } @@ -624,7 +624,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } parameter_types! { pub const ExistentialDeposit: u64 = 1; diff --git a/frame/vesting/Cargo.toml b/frame/vesting/Cargo.toml index 6923297af5616be2dffcb1bad90b775480056397..f01a0f6bf22b93752110f851ac0d7fd0bab796a5 100644 --- a/frame/vesting/Cargo.toml +++ b/frame/vesting/Cargo.toml @@ -1,23 +1,27 @@ [package] name = "pallet-vesting" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" +license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME pallet for manage vesting" [dependencies] serde = { version = "1.0.101", optional = true } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } enumflags2 = { version = "0.6.2" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -frame-support = { version = "2.0.0", default-features = false, path = "../support" } -frame-system = { version = "2.0.0", default-features = false, path = "../system" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../support" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../system" } [dev-dependencies] -sp-core = { version = "2.0.0", path = "../../primitives/core" } -pallet-balances = { version = "2.0.0", path = "../balances" } -sp-storage = { version = "2.0.0", path = "../../primitives/storage" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +pallet-balances = { version = "2.0.0-alpha.2", path = "../balances" } +sp-storage = { version = "2.0.0-alpha.2", path = "../../primitives/storage" } hex-literal = "0.2.1" [features] diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 15adcaaf53bb707bbfca0e9cade45fb95940fd20..02d5bebfd2cbde83e17585c36762de9ec1510c08 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -52,10 +52,12 @@ use codec::{Encode, Decode}; use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ StaticLookup, Zero, AtLeast32Bit, MaybeSerializeDeserialize, Convert }}; -use frame_support::{decl_module, decl_event, decl_storage, decl_error}; +use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; use frame_support::traits::{ - Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier + Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, ExistenceRequirement, + Get, }; +use frame_support::weights::SimpleDispatchInfo; use frame_system::{self as system, ensure_signed}; type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; @@ -69,6 +71,9 @@ pub trait Trait: frame_system::Trait { /// Convert the block number into a balance. type BlockNumberToBalance: Convert>; + + /// The minimum amount transferred to call `vested_transfer`. + type MinVestedTransfer: Get>; } const VESTING_ID: LockIdentifier = *b"vesting "; @@ -158,6 +163,8 @@ decl_error! { NotVesting, /// An existing vesting schedule already exists for this account that cannot be clobbered. ExistingVestingSchedule, + /// Amount being transferred is too low to create a vesting schedule. + AmountLow, } } @@ -166,6 +173,9 @@ decl_module! { pub struct Module for enum Call where origin: T::Origin { type Error = Error; + /// The minimum amount to be transferred to create a new vesting schedule. + const MinVestedTransfer: BalanceOf = T::MinVestedTransfer::get(); + fn deposit_event() = default; /// Unlock any vested funds of the sender account. @@ -206,6 +216,40 @@ decl_module! { ensure_signed(origin)?; Self::update_lock(T::Lookup::lookup(target)?) } + + /// Create a vested transfer. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `target`: The account that should be transferred the vested funds. + /// - `amount`: The amount of funds to transfer and will be vested. + /// - `schedule`: The vesting schedule attached to the transfer. + /// + /// Emits `VestingCreated`. + /// + /// # + /// - Creates a new storage entry, but is protected by a minimum transfer + /// amount needed to succeed. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(1_000_000)] + pub fn vested_transfer( + origin, + target: ::Source, + schedule: VestingInfo, T::BlockNumber>, + ) -> DispatchResult { + let transactor = ensure_signed(origin)?; + ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); + + let who = T::Lookup::lookup(target)?; + ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); + + T::Currency::transfer(&transactor, &who, schedule.locked, ExistenceRequirement::AllowDeath)?; + + Self::add_vesting_schedule(&who, schedule.locked, schedule.per_block, schedule.starting_block) + .expect("user does not have an existing vesting schedule; q.e.d."); + + Ok(()) + } } } @@ -237,13 +281,13 @@ impl VestingSchedule for Module where type Currency = T::Currency; /// Get the amount that is currently being vested and cannot be transferred out of this account. - fn vesting_balance(who: &T::AccountId) -> BalanceOf { + fn vesting_balance(who: &T::AccountId) -> Option> { if let Some(v) = Self::vesting(who) { let now = >::block_number(); let locked_now = v.locked_at::(now); - T::Currency::free_balance(who).min(locked_now) + Some(T::Currency::free_balance(who).min(locked_now)) } else { - Zero::zero() + None } } @@ -301,7 +345,7 @@ mod tests { use sp_runtime::{ Perbill, testing::Header, - traits::{BlakeTwo256, IdentityLookup, Identity, OnInitialize}, + traits::{BlakeTwo256, IdentityLookup, Identity, OnRuntimeUpgrade}, }; use sp_storage::Storage; @@ -339,7 +383,7 @@ mod tests { type ModuleToIndex = (); type AccountData = pallet_balances::AccountData; type OnNewAccount = (); - type OnReapAccount = Balances; + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { type Balance = u64; @@ -348,10 +392,14 @@ mod tests { type ExistentialDeposit = ExistentialDeposit; type AccountStore = System; } + parameter_types! { + pub const MinVestedTransfer: u64 = 256 * 2; + } impl Trait for Test { type Event = (); type Currency = Balances; type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; } type System = frame_system::Module; type Balances = pallet_balances::Module; @@ -436,7 +484,7 @@ mod tests { ]; s.top = data.into_iter().collect(); sp_io::TestExternalities::new(s).execute_with(|| { - Balances::on_initialize(1); + Balances::on_runtime_upgrade(); assert_eq!(Balances::free_balance(6), 60); assert_eq!(Balances::usable_balance(&6), 30); System::set_block_number(2); @@ -484,28 +532,28 @@ mod tests { assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 - assert_eq!(Vesting::vesting_balance(&1), 128 * 9); + assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); // Account 2 has their full balance locked - assert_eq!(Vesting::vesting_balance(&2), user2_free_balance); + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), user12_free_balance - 256 * 5); + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); System::set_block_number(10); assert_eq!(System::block_number(), 10); // Account 1 has fully vested by block 10 - assert_eq!(Vesting::vesting_balance(&1), 0); + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 2 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&2), user2_free_balance); + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), user12_free_balance - 256 * 5); + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); System::set_block_number(30); assert_eq!(System::block_number(), 30); - assert_eq!(Vesting::vesting_balance(&1), 0); // Account 1 is still fully vested, and not negative - assert_eq!(Vesting::vesting_balance(&2), 0); // Account 2 has fully vested by block 30 - assert_eq!(Vesting::vesting_balance(&12), 0); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative + assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 }); } @@ -520,7 +568,7 @@ mod tests { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), 45); + assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_noop!( Balances::transfer(Some(1).into(), 2, 56), pallet_balances::Error::::LiquidityRestrictions, @@ -538,7 +586,7 @@ mod tests { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), 45); + assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); }); @@ -554,7 +602,7 @@ mod tests { let user1_free_balance = Balances::free_balance(&1); assert_eq!(user1_free_balance, 100); // Account 1 has free balance // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), 45); + assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest_other(Some(2).into(), 1)); assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); }); @@ -577,12 +625,12 @@ mod tests { assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal // Account 1 has only 5 units vested at block 1 (plus 150 unvested) - assert_eq!(Vesting::vesting_balance(&1), 45); + assert_eq!(Vesting::vesting_balance(&1), Some(45)); assert_ok!(Vesting::vest(Some(1).into())); assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained // Account 2 has no units vested at block 1, but gained 100 - assert_eq!(Vesting::vesting_balance(&2), 200); + assert_eq!(Vesting::vesting_balance(&2), Some(200)); assert_ok!(Vesting::vest(Some(2).into())); assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained }); @@ -599,7 +647,7 @@ mod tests { assert_eq!(user12_free_balance, 2560); // Account 12 has free balance // Account 12 has liquid funds - assert_eq!(Vesting::vesting_balance(&12), user12_free_balance - 256 * 5); + assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); // Account 12 has delayed vesting let user12_vesting_schedule = VestingInfo { @@ -613,4 +661,95 @@ mod tests { assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); }); } + + #[test] + fn vested_transfer_works() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + assert_eq!(System::block_number(), 1); + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); + } + + #[test] + fn vested_transfer_correctly_fails() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + assert_eq!(System::block_number(), 1); + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = VestingInfo { + locked: 256 * 1, + per_block: 64, + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); + } } diff --git a/primitives/allocator/Cargo.toml b/primitives/allocator/Cargo.toml index 3cfc9a4de618a122969cdb88b4f56546ccc3a4a3..944cfa6de19d57c8f9a010b5b1ab831f69adf31f 100644 --- a/primitives/allocator/Cargo.toml +++ b/primitives/allocator/Cargo.toml @@ -1,14 +1,18 @@ [package] name = "sp-allocator" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Collection of allocator implementations." +documentation = "https://docs.rs/sp-allocator" [dependencies] -sp-std = { version = "2.0.0", path = "../std", default-features = false } -sp-core = { version = "2.0.0", path = "../core", default-features = false } -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-alpha.2", path = "../std", default-features = false } +sp-core = { version = "2.0.0-alpha.2", path = "../core", default-features = false } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../wasm-interface", default-features = false } log = { version = "0.4.8", optional = true } derive_more = { version = "0.99.2", optional = true } diff --git a/primitives/api/Cargo.toml b/primitives/api/Cargo.toml index f6966f94622db6e4ab51cfa54e58fbd32e65ed87..a33ff0fa0d75453077cec6650c8c62af54022e91 100644 --- a/primitives/api/Cargo.toml +++ b/primitives/api/Cargo.toml @@ -1,22 +1,25 @@ [package] name = "sp-api" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate runtime api primitives" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-api-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-version = { version = "2.0.0", default-features = false, path = "../version" } -sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-api-proc-macro = { version = "2.0.0-alpha.2", path = "proc-macro" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../version" } +sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } hash-db = { version = "0.15.2", optional = true } [dev-dependencies] -sp-test-primitives = { version = "2.0.0", path = "../test-primitives" } +sp-test-primitives = { version = "2.0.0-dev", path = "../test-primitives" } [features] default = [ "std" ] diff --git a/primitives/api/proc-macro/Cargo.toml b/primitives/api/proc-macro/Cargo.toml index 66c2c5dba82d02999b3ca4e938a6961500a787b3..940e21759642fdd12fc6339a34a2e67410f956ac 100644 --- a/primitives/api/proc-macro/Cargo.toml +++ b/primitives/api/proc-macro/Cargo.toml @@ -1,15 +1,20 @@ [package] name = "sp-api-proc-macro" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Macros for declaring and implementing runtime apis." +documentation = "https://docs.rs/sp-api-proc-macro" + [lib] proc-macro = true [dependencies] -quote = "1.0.2" +quote = "1.0.3" syn = { version = "1.0.8", features = ["full", "fold", "extra-traits", "visit"] } proc-macro2 = "1.0.6" blake2-rfc = { version = "0.2.18", default-features = false } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 770a843bfa6c1e669c05045fcdbdda9e6c7c2c7b..e16cf3b5c46f78f50fbc03f5bff0f699dbedf503 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -27,7 +27,7 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, Path, Signature, + spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, Path, Signature, Attribute, ImplItem, parse::{Parse, ParseStream, Result, Error}, PathArguments, GenericArgument, TypePath, fold::{self, Fold}, parse_quote, }; @@ -141,7 +141,7 @@ fn extract_runtime_block_ident(trait_: &Path) -> Result<&TypePath> { fn generate_impl_calls( impls: &[ItemImpl], input: &Ident -) -> Result> { +) -> Result)>> { let mut impl_calls = Vec::new(); for impl_ in impls { @@ -162,9 +162,12 @@ fn generate_impl_calls( &impl_trait )?; - impl_calls.push( - (impl_trait_ident.clone(), method.sig.ident.clone(), impl_call) - ); + impl_calls.push(( + impl_trait_ident.clone(), + method.sig.ident.clone(), + impl_call, + filter_cfg_attrs(&impl_.attrs), + )); } } } @@ -178,9 +181,12 @@ fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { let c = generate_crate_access(HIDDEN_INCLUDES_ID); let impl_calls = generate_impl_calls(impls, &data)? .into_iter() - .map(|(trait_, fn_name, impl_)| { + .map(|(trait_, fn_name, impl_, attrs)| { let name = prefix_function_with_trait(&trait_, &fn_name); - quote!( #name => Some(#c::Encode::encode(&{ #impl_ })), ) + quote!( + #( #attrs )* + #name => Some(#c::Encode::encode(&{ #impl_ })), + ) }); Ok(quote!( @@ -200,13 +206,14 @@ fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { let c = generate_crate_access(HIDDEN_INCLUDES_ID); let impl_calls = generate_impl_calls(impls, &input)? .into_iter() - .map(|(trait_, fn_name, impl_)| { + .map(|(trait_, fn_name, impl_, attrs)| { let fn_name = Ident::new( &prefix_function_with_trait(&trait_, &fn_name), Span::call_site() ); quote!( + #( #attrs )* #[cfg(not(feature = "std"))] #[no_mangle] pub fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { @@ -237,7 +244,7 @@ fn generate_runtime_api_base_structures() -> Result { pub struct RuntimeApiImpl + 'static> where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { call: &'static C, commit_on_success: std::cell::RefCell, @@ -257,7 +264,7 @@ fn generate_runtime_api_base_structures() -> Result { for RuntimeApiImpl where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, {} #[cfg(any(feature = "std", test))] @@ -265,7 +272,7 @@ fn generate_runtime_api_base_structures() -> Result { for RuntimeApiImpl where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, {} #[cfg(any(feature = "std", test))] @@ -273,7 +280,7 @@ fn generate_runtime_api_base_structures() -> Result { for RuntimeApiImpl where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { type Error = C::Error; } @@ -283,7 +290,7 @@ fn generate_runtime_api_base_structures() -> Result { RuntimeApiImpl where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { type StateBackend = C::StateBackend; @@ -327,7 +334,7 @@ fn generate_runtime_api_base_structures() -> Result { &self, backend: &Self::StateBackend, changes_trie_state: Option<&#crate_::ChangesTrieState< - #crate_::HasherFor, + #crate_::HashFor, #crate_::NumberFor, >>, parent_hash: Block::Hash, @@ -351,7 +358,7 @@ fn generate_runtime_api_base_structures() -> Result { where C: #crate_::CallApiAt + 'static, // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { type RuntimeApi = RuntimeApiImpl; @@ -373,7 +380,7 @@ fn generate_runtime_api_base_structures() -> Result { impl> RuntimeApiImpl where // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HasherFor>, + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, { fn call_api_at< R: #crate_::Encode + #crate_::Decode + PartialEq, @@ -447,6 +454,7 @@ fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { let trait_ = extend_with_runtime_decl_path(trait_); impl_.trait_.as_mut().unwrap().1 = trait_; + impl_.attrs = filter_cfg_attrs(&impl_.attrs); impls_prepared.push(impl_); } @@ -603,7 +611,7 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { where_clause.predicates.push( parse_quote! { RuntimeApiImplCall::StateBackend: - #crate_::StateBackend<#crate_::HasherFor<__SR_API_BLOCK__>> + #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> } ); @@ -622,6 +630,8 @@ impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { } ); + input.attrs = filter_cfg_attrs(&input.attrs); + // The implementation for the `RuntimeApiImpl` is only required when compiling with // the feature `std` or `test`. input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); @@ -695,8 +705,12 @@ fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { let id: Path = parse_quote!( #path ID ); let version: Path = parse_quote!( #path VERSION ); + let attrs = filter_cfg_attrs(&impl_.attrs); - result.push(quote!( (#id, #version) )); + result.push(quote!( + #( #attrs )* + (#id, #version) + )); } let c = generate_crate_access(HIDDEN_INCLUDES_ID); @@ -745,3 +759,32 @@ fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { ) ) } + +// Filters all attributes except the cfg ones. +fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { + attrs.into_iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn filter_non_cfg_attributes() { + let cfg_std: Attribute = parse_quote!(#[cfg(feature = "std")]); + let cfg_benchmarks: Attribute = parse_quote!(#[cfg(feature = "runtime-benchmarks")]); + + let attrs = vec![ + cfg_std.clone(), + parse_quote!(#[derive(Debug)]), + parse_quote!(#[test]), + cfg_benchmarks.clone(), + parse_quote!(#[allow(non_camel_case_types)]), + ]; + + let filtered = filter_cfg_attrs(&attrs); + assert_eq!(filtered.len(), 2); + assert_eq!(cfg_std, filtered[0]); + assert_eq!(cfg_benchmarks, filtered[1]); + } +} diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index bde00d48172e8e4cf9ec855e7cf271b0e36ac289..0901be5831dfd9758bdc59b466302963ad8f0fb8 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -50,7 +50,7 @@ pub use sp_core::to_substrate_wasm_fn_return_value; #[doc(hidden)] pub use sp_runtime::{ traits::{ - Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HasherFor, NumberFor, + Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HashFor, NumberFor, Header as HeaderT, Hash as HashT, }, generic::BlockId, transaction_validity::TransactionValidity, @@ -228,22 +228,20 @@ pub use sp_api_proc_macro::impl_runtime_apis; /// A type that records all accessed trie nodes and generates a proof out of it. #[cfg(feature = "std")] -pub type ProofRecorder = sp_state_machine::ProofRecorder< - <<::Header as HeaderT>::Hashing as HashT>::Hasher ->; +pub type ProofRecorder = sp_state_machine::ProofRecorder>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< - >>::Transaction, HasherFor, NumberFor + >>::Transaction, HashFor, NumberFor >; #[cfg(feature = "std")] pub type StorageChanges = sp_state_machine::StorageChanges< - >>::Transaction, - HasherFor, + >>::Transaction, + HashFor, NumberFor >; @@ -255,7 +253,7 @@ pub type StateBackendFor = /// Extract the state backend transaction type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] pub type TransactionFor = - as StateBackend>>::Transaction; + as StateBackend>>::Transaction; /// Something that can be constructed to a runtime api. #[cfg(feature = "std")] @@ -279,7 +277,7 @@ pub trait ApiErrorExt { #[cfg(feature = "std")] pub trait ApiExt: ApiErrorExt { /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; + type StateBackend: StateBackend>; /// The given closure will be called with api instance. Inside the closure any api call is /// allowed. After doing the api call, the closure is allowed to map the `Result` to a @@ -328,7 +326,7 @@ pub trait ApiExt: ApiErrorExt { fn into_storage_changes( &self, backend: &Self::StateBackend, - changes_trie_state: Option<&ChangesTrieState, NumberFor>>, + changes_trie_state: Option<&ChangesTrieState, NumberFor>>, parent_hash: Block::Hash, ) -> Result, String> where Self: Sized; } @@ -355,7 +353,7 @@ pub enum InitializeBlock<'a, Block: BlockT> { /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] -pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>> { +pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>> { /// A reference to something that implements the [`Core`] api. pub core_api: &'a C, /// The block id that determines the state that should be setup when calling the function. @@ -389,7 +387,7 @@ pub trait CallApiAt { type Error: std::fmt::Debug + From; /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; + type StateBackend: StateBackend>; /// Calls the given api function with the given encoded arguments at the given block and returns /// the encoded result. diff --git a/primitives/api/test/Cargo.toml b/primitives/api/test/Cargo.toml index c193c9dc5ef3cc97331059c959734806b6d0918c..6d2207c178a22db83f990123859168e69f6f8914 100644 --- a/primitives/api/test/Cargo.toml +++ b/primitives/api/test/Cargo.toml @@ -1,25 +1,29 @@ [package] name = "sp-api-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0", path = "../" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-blockchain = { version = "2.0.0", path = "../../blockchain" } -sp-consensus = { version = "0.8", path = "../../../primitives/consensus/common" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } +sp-api = { version = "2.0.0-alpha.2", path = "../" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } +sp-version = { version = "2.0.0-alpha.2", path = "../../version" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../blockchain" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../../primitives/consensus/common" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../../../client/block-builder" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } trybuild = "1.0.17" rustversion = "1.0.0" [dev-dependencies] criterion = "0.3.0" -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } [[bench]] name = "bench" diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index 66412edae026d1141240d29b5e85c6a19e1aea55..18beaad9170af9624b1f21cf16eb63b9062278bd 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -18,9 +18,9 @@ use sp_api::ProvideRuntimeApi; use substrate_test_runtime_client::{ prelude::*, DefaultTestClientBuilderExt, TestClientBuilder, - runtime::{TestAPI, DecodeFails, Transfer, Header}, + runtime::{TestAPI, DecodeFails, Transfer, Block}, }; -use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, Hash as HashT}}; +use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; use sp_state_machine::{ ExecutionStrategy, create_proof_check_backend, execution_proof_check_on_trie_backend, @@ -28,6 +28,7 @@ use sp_state_machine::{ use sp_consensus::SelectChain; use codec::Encode; +use sc_block_builder::BlockBuilderProvider; fn calling_function_with_strat(strat: ExecutionStrategy) { let client = TestClientBuilder::new().set_execution_strategy(strat).build(); @@ -177,7 +178,7 @@ fn record_proof_works() { builder.push(transaction.clone()).unwrap(); let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - let backend = create_proof_check_backend::<<

::Hashing as HashT>::Hasher>( + let backend = create_proof_check_backend::>( storage_root, proof.expect("Proof was generated"), ).expect("Creates proof backend."); diff --git a/primitives/application-crypto/Cargo.toml b/primitives/application-crypto/Cargo.toml index 1fa9a6631c4efd969e7bcfada6c29bbcfc0e19df..ee710b29885a3fbe80202eb06e5799d875614cda 100644 --- a/primitives/application-crypto/Cargo.toml +++ b/primitives/application-crypto/Cargo.toml @@ -1,17 +1,21 @@ [package] name = "sp-application-crypto" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" description = "Provides facilities for generating application specific crypto wrapper types." license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sp-application-crypto" + [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } [features] default = [ "std" ] @@ -21,7 +25,7 @@ std = [ "full_crypto", "sp-core/std", "codec/std", "serde", "sp-std/std", "sp-io # or Intel SGX. # For the regular wasm runtime builds this should not be used. full_crypto = [ - "sp-core/full_crypto", + "sp-core/full_crypto", # Don't add `panic_handler` and `alloc_error_handler` since they are expected to be provided # by the user anyway. "sp-io/disable_panic_handler", diff --git a/primitives/application-crypto/test/Cargo.toml b/primitives/application-crypto/test/Cargo.toml index de4412db2d7cb4b011a668f647b900125347bfd7..0ed5355ce2d1e527303aa2badb0f9a1f1c544200 100644 --- a/primitives/application-crypto/test/Cargo.toml +++ b/primitives/application-crypto/test/Cargo.toml @@ -1,15 +1,17 @@ [package] name = "sp-application-crypto-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" description = "Integration tests for application-crypto" license = "GPL-3.0" publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -substrate-test-runtime-client = { version = "2.0.0", path = "../../../test-utils/runtime/client" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -sp-api = { version = "2.0.0", path = "../../api" } -sp-application-crypto = { version = "2.0.0", path = "../" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../test-utils/runtime/client" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../api" } +sp-application-crypto = { version = "2.0.0-alpha.2", path = "../" } diff --git a/primitives/arithmetic/Cargo.toml b/primitives/arithmetic/Cargo.toml index 995e36d5c905479e2eec506d134d35f25145f8c3..617e8b8e5ceb86866e51eed4bfa036bcae7d2c61 100644 --- a/primitives/arithmetic/Cargo.toml +++ b/primitives/arithmetic/Cargo.toml @@ -1,17 +1,22 @@ [package] name = "sp-arithmetic" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Minimal fixed point arithmetic primitives and types for runtime." +documentation = "https://docs.rs/sp-arithmetic" + [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } integer-sqrt = "0.1.2" num-traits = { version = "0.2.8", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-debug-derive = { version = "2.0.0", default-features = false, path = "../../primitives/debug-derive" } +sp-debug-derive = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/debug-derive" } [dev-dependencies] primitive-types = "0.6.2" diff --git a/primitives/authority-discovery/Cargo.toml b/primitives/authority-discovery/Cargo.toml index 44a7cad15559531edb91f5853b57d1a6c9b562b3..b4197c56d7e5ff912070b08af284a9693e74a73e 100644 --- a/primitives/authority-discovery/Cargo.toml +++ b/primitives/authority-discovery/Cargo.toml @@ -1,17 +1,19 @@ [package] name = "sp-authority-discovery" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Authority discovery primitives" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", default-features = false, version = "1.0.3" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", default-features = false, version = "1.2.0" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/authorship/Cargo.toml b/primitives/authorship/Cargo.toml index 7dc5fcfc95f1e1a8ff95c98cf8f7d4d410209a21..c5215631cca28c413460b2e77107cd1c575e7122 100644 --- a/primitives/authorship/Cargo.toml +++ b/primitives/authorship/Cargo.toml @@ -1,16 +1,18 @@ [package] name = "sp-authorship" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Authorship primitives" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/block-builder/Cargo.toml b/primitives/block-builder/Cargo.toml index 1700209ec46c22444c69ee52697a5f9cb7b21ab8..b5cb592d55b4109264d57b99c349d901626b8706 100644 --- a/primitives/block-builder/Cargo.toml +++ b/primitives/block-builder/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "sp-block-builder" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "The block builder runtime api." [dependencies] -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } [features] default = [ "std" ] diff --git a/primitives/block-builder/src/lib.rs b/primitives/block-builder/src/lib.rs index e4e98e1f40cbf4979f6c04113e520fcb308ed96f..71c2a3ccb55f4d45f49104ce9df86eb988de34ab 100644 --- a/primitives/block-builder/src/lib.rs +++ b/primitives/block-builder/src/lib.rs @@ -22,37 +22,10 @@ use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; use sp_inherents::{InherentData, CheckInherentsResult}; -/// Definitions for supporting the older version of API: v3 -/// -/// These definitions are taken from the 2c58e30246a029b53d51e5b24c31974ac539ee8b git revision. -#[deprecated(note = "These definitions here are only for compatibility reasons")] -pub mod compatibility_v3 { - use sp_runtime::{DispatchOutcome, transaction_validity}; - use codec::{Encode, Decode}; - - #[derive(Eq, PartialEq, Clone, Copy, Decode, Encode, Debug)] - pub enum ApplyError { - NoPermission, - BadState, - Validity(transaction_validity::TransactionValidityError), - } - - // `ApplyOutcome` was renamed to `DispatchOutcome` with the layout preserved. - pub type ApplyResult = Result; -} - sp_api::decl_runtime_apis! { /// The `BlockBuilder` api trait that provides the required functionality for building a block. #[api_version(5)] pub trait BlockBuilder { - /// Compatibility version of `apply_extrinsic` for v3. - /// - /// Only the return type is changed. - #[changed_in(4)] - #[allow(deprecated)] - fn apply_extrinsic(extrinsic: ::Extrinsic) - -> self::compatibility_v3::ApplyResult; - /// Apply the given extrinsic. /// /// Returns an inclusion outcome which specifies if this extrinsic is included in diff --git a/primitives/blockchain/Cargo.toml b/primitives/blockchain/Cargo.toml index 8b93436f9a3dc116aed1f8f19c9bf7a4b83c666a..5fb8ce3574719cd3af0f0231b682c38110e763cc 100644 --- a/primitives/blockchain/Cargo.toml +++ b/primitives/blockchain/Cargo.toml @@ -1,17 +1,22 @@ [package] name = "sp-blockchain" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate blockchain traits and primitives." +documentation = "https://docs.rs/sp-blockchain" + [dependencies] log = "0.4.8" lru = "0.4.0" parking_lot = "0.10.0" derive_more = "0.99.2" -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-consensus = { version = "0.8", path = "../consensus/common" } -sp-runtime = { version = "2.0.0", path = "../runtime" } -sp-block-builder = { version = "2.0.0", path = "../block-builder" } -sp-state-machine = { version = "0.8", path = "../state-machine" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-consensus = { version = "0.8.0-alpha.2", path = "../consensus/common" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } +sp-block-builder = { version = "2.0.0-alpha.2", path = "../block-builder" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../state-machine" } diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index ece20d1cf8586d5500195333fd38a5c71a502b0f..a4ec9c29952ab986f2615bec87dee37c7de81fae 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -19,8 +19,6 @@ use std::{self, error, result}; use sp_state_machine; use sp_runtime::transaction_validity::TransactionValidityError; -#[allow(deprecated)] -use sp_block_builder::compatibility_v3; use sp_consensus; use derive_more::{Display, From}; use codec::Error as CodecError; @@ -150,17 +148,6 @@ impl<'a> From<&'a str> for Error { } } -#[allow(deprecated)] -impl From for ApplyExtrinsicFailed { - fn from(e: compatibility_v3::ApplyError) -> Self { - use self::compatibility_v3::ApplyError::*; - match e { - Validity(tx_validity) => Self::Validity(tx_validity), - e => Self::Msg(format!("Apply extrinsic failed: {:?}", e)), - } - } -} - impl Error { /// Chain a blockchain error. pub fn from_blockchain(e: Box) -> Self { diff --git a/primitives/consensus/aura/Cargo.toml b/primitives/consensus/aura/Cargo.toml index 6e05bf4aac427699cfb25270f5b76a626c0f97ed..04d411939975f096b6b3d3536d7a6d992ddbe55b 100644 --- a/primitives/consensus/aura/Cargo.toml +++ b/primitives/consensus/aura/Cargo.toml @@ -1,19 +1,21 @@ [package] name = "sp-consensus-aura" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../inherents" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/babe/Cargo.toml b/primitives/consensus/babe/Cargo.toml index 7f0277a720205c500a1369311fcb6e466906de1e..f910b73ed6e0045ce475b884eafc86a6c0389f86 100644 --- a/primitives/consensus/babe/Cargo.toml +++ b/primitives/consensus/babe/Cargo.toml @@ -1,21 +1,23 @@ [package] name = "sp-consensus-babe" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Primitives for BABE consensus" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } schnorrkel = { version = "0.8.5", features = ["preaudit_deprecated"], optional = true } -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-consensus = { version = "0.8", optional = true, path = "../common" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../inherents" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-timestamp = { version = "2.0.0", default-features = false, path = "../../timestamp" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } +sp-consensus = { version = "0.8.0-alpha.2", optional = true, path = "../common" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../inherents" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } +sp-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../timestamp" } [features] default = ["std"] diff --git a/primitives/consensus/common/Cargo.toml b/primitives/consensus/common/Cargo.toml index d7c33a6452c3bc94863e01d523a18494efee29c9..e8d3c0c6118d0c4296a0f93aebdcc6640537ca21 100644 --- a/primitives/consensus/common/Cargo.toml +++ b/primitives/consensus/common/Cargo.toml @@ -1,30 +1,34 @@ [package] name = "sp-consensus" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] -description = "Common utilities for substrate consensus" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Common utilities for building and using consensus engines in substrate." +documentation = "https://docs.rs/sp-consensus/" + [dependencies] derive_more = "0.99.2" -libp2p = { version = "0.16.0", default-features = false } +libp2p = { version = "0.16.2", default-features = false } log = "0.4.8" -sp-core = { path= "../../core" } -sp-inherents = { version = "2.0.0", path = "../../inherents" } -sp-state-machine = { version = "0.8.0", path = "../../../primitives/state-machine" } +sp-core = { path= "../../core" , version = "2.0.0-alpha.2"} +sp-inherents = { version = "2.0.0-alpha.2", path = "../../inherents" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } futures = { version = "0.3.1", features = ["thread-pool"] } futures-timer = "3.0.1" futures-diagnose = "1.0" -sp-std = { version = "2.0.0", path = "../../std" } -sp-version = { version = "2.0.0", path = "../../version" } -sp-runtime = { version = "2.0.0", path = "../../runtime" } -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", path = "../../std" } +sp-version = { version = "2.0.0-alpha.2", path = "../../version" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } parking_lot = "0.10.0" serde = { version = "1.0", features = ["derive"] } [dev-dependencies] -sp-test-primitives = { version = "2.0.0", path = "../../test-primitives" } +sp-test-primitives = { version = "2.0.0-dev", path = "../../test-primitives" } [features] default = [] diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index 3000477ded527836b32f7382522fa8faab499fd5..eb90ac9f1d4df6ceb0ecba42bca4b7911cce7de5 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -16,7 +16,7 @@ //! Block import helpers. -use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HasherFor}; +use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HashFor}; use sp_runtime::Justification; use serde::{Serialize, Deserialize}; use std::borrow::Cow; @@ -139,7 +139,7 @@ pub struct BlockImportParams { /// The changes to the storage to create the state for the block. If this is `Some(_)`, /// the block import will not need to re-execute the block for importing it. pub storage_changes: Option< - sp_state_machine::StorageChanges, NumberFor> + sp_state_machine::StorageChanges, NumberFor> >, /// Is this block finalized already? /// `true` implies instant finality. diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 4927faede06f759bb5321f2313631aab44ef4cd3..09dc031dc9ba08863ac4f53136792bea2aef7668 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -32,7 +32,7 @@ use std::sync::Arc; use std::time::Duration; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HasherFor}, + generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HashFor}, }; use futures::prelude::*; pub use sp_inherents::InherentData; @@ -93,7 +93,7 @@ pub struct Proposal { /// Optional proof that was recorded while building the block. pub proof: Option, /// The storage changes while building this block. - pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, + pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, } /// Used as parameter to [`Proposer`] to tell the requirement on recording a proof. diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index d94511c11027acc4c93ee7dc9d54279bdc4a8a3b..fe0d3972043b91201adb330ecaaf16b3a93838ce 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -23,11 +23,11 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// specific chain build. /// /// The Strategy can be customized for the two use cases of authoring new blocks -/// upon the best chain or which fork to finalise. Unless implemented differently +/// upon the best chain or which fork to finalize. Unless implemented differently /// by default finalization methods fall back to use authoring, so as a minimum /// `_authoring`-functions must be implemented. /// -/// Any particular user must make explicit, however, whether they intend to finalise +/// Any particular user must make explicit, however, whether they intend to finalize /// or author through the using the right function call, as these might differ in /// some implementations. /// diff --git a/primitives/consensus/pow/Cargo.toml b/primitives/consensus/pow/Cargo.toml index 8e964e0c18456e01b884c96f7f005d45b5b328f9..4abead3258f81afdec7243f4c374d26281ac7544 100644 --- a/primitives/consensus/pow/Cargo.toml +++ b/primitives/consensus/pow/Cargo.toml @@ -1,17 +1,19 @@ [package] name = "sp-consensus-pow" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Primitives for Aura consensus" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../runtime" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../api" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../runtime" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } [features] default = ["std"] diff --git a/primitives/core/Cargo.toml b/primitives/core/Cargo.toml index 0d28fe139a924c090267e2a1a2839605a5152935..83ef683a39a79d42b3705ff67e525e5980315f05 100644 --- a/primitives/core/Cargo.toml +++ b/primitives/core/Cargo.toml @@ -1,13 +1,17 @@ [package] name = "sp-core" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Shareable Substrate types." +documentation = "https://docs.rs/sp-core" [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } rustc-hex = { version = "2.0.1", default-features = false } log = { version = "0.4.8", default-features = false } serde = { version = "1.0.101", optional = true, features = ["derive"] } @@ -26,9 +30,9 @@ num-traits = { version = "0.2.8", default-features = false } zeroize = { version = "1.0.0", default-features = false } lazy_static = { version = "1.4.0", default-features = false, optional = true } parking_lot = { version = "0.10.0", optional = true } -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -sp-storage = { version = "2.0.0", default-features = false, path = "../storage" } +sp-debug-derive = { version = "2.0.0-alpha.2", path = "../debug-derive" } +sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } +sp-storage = { version = "2.0.0-alpha.2", default-features = false, path = "../storage" } libsecp256k1 = { version = "0.3.2", default-features = false, features = ["hmac"] } parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } @@ -41,10 +45,10 @@ sha2 = { version = "0.8.0", default-features = false, optional = true } hex = { version = "0.4", default-features = false, optional = true } twox-hash = { version = "1.5.0", default-features = false, optional = true } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } +sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime-interface" } [dev-dependencies] -sp-serializer = { version = "2.0.0", path = "../serializer" } +sp-serializer = { version = "2.0.0-alpha.2", path = "../serializer" } pretty_assertions = "0.6.1" hex-literal = "0.2.1" rand = "0.7.2" diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 3833c8a912cece644561116d4adfae87c2d1f007..574b8d417dadfca6506395aabda85d1786f273d0 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -436,16 +436,26 @@ ss58_address_format!( (0, "polkadot", "Polkadot Relay-chain, direct checksum, standard account (*25519).") KusamaAccountDirect => (2, "kusama", "Kusama Relay-chain, direct checksum, standard account (*25519).") - DothereumAccountDirect => - (20, "dothereum", "Dothereum Para-chain, direct checksum, standard account (*25519).") - KulupuAccountDirect => - (16, "kulupu", "Kulupu mainnet, direct checksum, standard account (*25519).") EdgewareAccountDirect => (7, "edgeware", "Edgeware mainnet, direct checksum, standard account (*25519).") + KaruraAccountDirect => + (8, "karura", "Acala Karura canary network, direct checksum, standard account (*25519).") + ReynoldsAccountDirect => + (9, "reynolds", "Laminar Reynolds canary network, direct checksum, standard account (*25519).") + AcalaAccountDirect => + (10, "acala", "Acala mainnet, direct checksum, standard account (*25519).") + LaminarAccountDirect => + (11, "laminar", "Laminar mainnet, direct checksum, standard account (*25519).") + KulupuAccountDirect => + (16, "kulupu", "Kulupu mainnet, direct checksum, standard account (*25519).") + DothereumAccountDirect => + (20, "dothereum", "Dothereum Para-chain, direct checksum, standard account (*25519).") CentrifugeAccountDirect => (36, "centrifuge", "Centrifuge Chain mainnet, direct checksum, standard account (*25519).") SubstraTeeAccountDirect => (44, "substratee", "Any SubstraTEE off-chain network private account, direct checksum, standard account (*25519).") + DarwiniaAccountDirect => + (18, "darwinia", "Darwinia Chain mainnet, direct checksum, standard account (*25519).") ); /// Set the default "version" (actually, this is a bit of a misnomer and the version byte is diff --git a/primitives/core/src/hasher.rs b/primitives/core/src/hasher.rs index 68fce90644b7058407506901d85e72c8a3495766..28da432da7142b1a8ada83946088a03eb90d54ee 100644 --- a/primitives/core/src/hasher.rs +++ b/primitives/core/src/hasher.rs @@ -16,27 +16,10 @@ //! Substrate Blake2b Hasher implementation -use hash_db::Hasher; -use hash256_std_hasher::Hash256StdHasher; -use crate::hash::H256; - pub mod blake2 { - use super::{Hasher, Hash256StdHasher, H256}; - #[cfg(feature = "std")] - use crate::hashing::blake2_256; - - #[cfg(not(feature = "std"))] - extern "C" { - fn ext_blake2_256(data: *const u8, len: u32, out: *mut u8); - } - #[cfg(not(feature = "std"))] - fn blake2_256(data: &[u8]) -> [u8; 32] { - let mut result: [u8; 32] = Default::default(); - unsafe { - ext_blake2_256(data.as_ptr(), data.len() as u32, result.as_mut_ptr()); - } - result - } + use hash_db::Hasher; + use hash256_std_hasher::Hash256StdHasher; + use crate::hash::H256; /// Concrete implementation of Hasher using Blake2b 256-bit hashes #[derive(Debug)] @@ -46,8 +29,9 @@ pub mod blake2 { type Out = H256; type StdHasher = Hash256StdHasher; const LENGTH: usize = 32; + fn hash(x: &[u8]) -> Self::Out { - blake2_256(x).into() + crate::hashing::blake2_256(x).into() } } } diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 01a96d8853afd9e3f81c7ce72eb29af85d801945..79721b9b7699177037745276fe7f9983bc61ed5a 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -61,6 +61,7 @@ pub mod ed25519; pub mod sr25519; pub mod ecdsa; pub mod hash; +#[cfg(feature = "std")] mod hasher; pub mod offchain; pub mod sandbox; @@ -77,8 +78,7 @@ pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; pub use crypto::{DeriveJunction, Pair, Public}; pub use hash_db::Hasher; -// Switch back to Blake after PoC-3 is out -// pub use self::hasher::blake::BlakeHasher; +#[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; pub use sp_storage as storage; diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index 82438dd6f85193e5a373076b4a550bb5a22ea926..f4faee6b026eb70f069149993b9b197246230da5 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -72,6 +72,8 @@ pub struct OffchainState { pub persistent_storage: InMemOffchainStorage, /// Local storage pub local_storage: InMemOffchainStorage, + /// Current timestamp (unix millis) + pub timestamp: u64, } impl OffchainState { @@ -144,7 +146,7 @@ impl TestOffchainExt { impl offchain::Externalities for TestOffchainExt { fn is_validator(&self) -> bool { - unimplemented!("not needed in tests so far") + true } fn network_state(&self) -> Result { @@ -155,7 +157,7 @@ impl offchain::Externalities for TestOffchainExt { } fn timestamp(&mut self) -> Timestamp { - unimplemented!("not needed in tests so far") + Timestamp::from_unix_millis(self.0.read().timestamp) } fn sleep_until(&mut self, _deadline: Timestamp) { diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index bd02d39fb55a50e400fdd50a3666b9a1a7c28cec..e86a0234bf54818cd5000ab8468f176f855e910f 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -92,12 +92,11 @@ pub trait CodeExecutor: Sized + Send + Sync + CallInWasm + Clone + 'static { /// Call a given method in the runtime. Returns a tuple of the result (either the output data /// or an execution error) together with a `bool`, which is true if native execution was used. fn call< - E: Externalities, R: codec::Codec + PartialEq, NC: FnOnce() -> Result + UnwindSafe, >( &self, - ext: &mut E, + ext: &mut dyn Externalities, method: &str, data: &[u8], use_native: bool, diff --git a/primitives/debug-derive/Cargo.toml b/primitives/debug-derive/Cargo.toml index 9b12bfb2059e2b630308bedf011e67b98537193f..1ffa37308e234b1cfb2c9dd36ff8ebba0e2433ee 100644 --- a/primitives/debug-derive/Cargo.toml +++ b/primitives/debug-derive/Cargo.toml @@ -1,15 +1,19 @@ [package] name = "sp-debug-derive" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Macros to derive runtime debug implementation." +documentation = "https://docs.rs/sp-debug-derive" [lib] proc-macro = true [dependencies] -quote = "1.0.2" +quote = "1.0.3" syn = "1.0.7" proc-macro2 = "1.0" diff --git a/primitives/externalities/Cargo.toml b/primitives/externalities/Cargo.toml index 23889da4ea289db7d9b12e60ea01efaa8d638ed8..f462b7670e8299af967708cc25419266e70f2301 100644 --- a/primitives/externalities/Cargo.toml +++ b/primitives/externalities/Cargo.toml @@ -1,11 +1,15 @@ [package] name = "sp-externalities" -version = "0.8.0" +version = "0.8.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate externalities abstraction" +documentation = "https://docs.rs/sp-externalities" [dependencies] -sp-storage = { version = "2.0.0", path = "../storage" } -sp-std = { version = "2.0.0", path = "../std" } +sp-storage = { version = "2.0.0-alpha.2", path = "../storage" } +sp-std = { version = "2.0.0-alpha.2", path = "../std" } environmental = { version = "1.1.1" } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 431e449846c6efcf32ad5eead1d0f16111360e72..fa0f9e4454dd2d250b6d858a7b7f504ff03ae259 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -203,13 +203,21 @@ pub trait Externalities: ExtensionStore { /// Returns the SCALE encoded hash. fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; - fn wipe(&mut self) { - unimplemented!() - } + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Wipes all changes from caches and the database. + /// + /// The state will be reset to genesis. + fn wipe(&mut self); - fn commit(&mut self) { - unimplemented!() - } + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Commits all changes to the database and clears all caches. + fn commit(&mut self); } /// Extension for the [`Externalities`] trait. diff --git a/primitives/finality-grandpa/Cargo.toml b/primitives/finality-grandpa/Cargo.toml index 6356ec82e532a8c902a89319f434596e62abbbe7..d36f0e4b527207ddb716b48aef496b184ca5b05b 100644 --- a/primitives/finality-grandpa/Cargo.toml +++ b/primitives/finality-grandpa/Cargo.toml @@ -1,17 +1,22 @@ [package] name = "sp-finality-grandpa" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Primitives for GRANDPA integration, suitable for WASM compilation." +documentation = "https://docs.rs/sp-finality-grandpa" + [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/finality-tracker/Cargo.toml b/primitives/finality-tracker/Cargo.toml index 1b0e81da9175a361854488462a27e3967553d029..f89cb24d4a74cb3dc5871e1c4f64b4d6862d4dfe 100644 --- a/primitives/finality-tracker/Cargo.toml +++ b/primitives/finality-tracker/Cargo.toml @@ -1,14 +1,17 @@ [package] name = "sp-finality-tracker" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME module that tracks the last finalized block, as perceived by block authors." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } [features] default = ["std"] diff --git a/primitives/inherents/Cargo.toml b/primitives/inherents/Cargo.toml index 698ab5389a543f0ff8fdfa3f87f8fd168e483f24..839edba73de92a1ae2c13bf2fa10961be3c36d65 100644 --- a/primitives/inherents/Cargo.toml +++ b/primitives/inherents/Cargo.toml @@ -1,15 +1,20 @@ [package] name = "sp-inherents" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Provides types and traits for creating and checking inherents." +documentation = "https://docs.rs/sp-inherents" + [dependencies] parking_lot = { version = "0.10.0", optional = true } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } derive_more = { version = "0.99.2", optional = true } [features] diff --git a/primitives/io/Cargo.toml b/primitives/io/Cargo.toml index f494697b4b57242ab4eb303d38827991e8937724..4f740638024bcaf8974bfc6830f904d47de7271b 100644 --- a/primitives/io/Cargo.toml +++ b/primitives/io/Cargo.toml @@ -1,21 +1,26 @@ [package] name = "sp-io" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "I/O for Substrate runtimes" +documentation = "https://docs.rs/sp-io" + [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } hash-db = { version = "0.15.2", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } libsecp256k1 = { version = "0.3.4", optional = true } -sp-state-machine = { version = "0.8", optional = true, path = "../../primitives/state-machine" } -sp-wasm-interface = { version = "2.0.0", path = "../../primitives/wasm-interface", default-features = false } -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../runtime-interface" } -sp-trie = { version = "2.0.0", optional = true, path = "../../primitives/trie" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } +sp-state-machine = { version = "0.8.0-alpha.2", optional = true, path = "../../primitives/state-machine" } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../../primitives/wasm-interface", default-features = false } +sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime-interface" } +sp-trie = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/trie" } +sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } log = { version = "0.4.8", optional = true } [features] diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index 5d29fe5c9499b1880a66c4c233f39e88c6c2f317..4b520a240a923694090acfb74411e95911a35aa8 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -474,7 +474,10 @@ pub trait Crypto { } /// Verify and recover a SECP256k1 ECDSA signature. - /// - `sig` is passed in RSV format. V should be either 0/1 or 27/28. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// /// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey /// (doesn't include the 0x04 prefix). fn secp256k1_ecdsa_recover( @@ -493,8 +496,11 @@ pub trait Crypto { } /// Verify and recover a SECP256k1 ECDSA signature. - /// - `sig` is passed in RSV format. V should be either 0/1 or 27/28. - /// - returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// + /// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. fn secp256k1_ecdsa_recover_compressed( sig: &[u8; 65], msg: &[u8; 32], diff --git a/primitives/keyring/Cargo.toml b/primitives/keyring/Cargo.toml index e2603a02623ab3d3a6e093a53ee83cc4df97a17b..1ec4ebe547f360a803dfa2b64e89427add103df0 100644 --- a/primitives/keyring/Cargo.toml +++ b/primitives/keyring/Cargo.toml @@ -1,12 +1,17 @@ [package] name = "sp-keyring" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Keyring support code for the runtime. A set of test accounts." +documentation = "https://docs.rs/sp-keyring" + [dependencies] -sp-core = { version = "2.0.0", path = "../core" } -sp-runtime = { version = "2.0.0", path = "../runtime" } +sp-core = { version = "2.0.0-alpha.2", path = "../core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } lazy_static = "1.4.0" strum = { version = "0.16.0", features = ["derive"] } diff --git a/primitives/offchain/Cargo.toml b/primitives/offchain/Cargo.toml index ef6d0a6d2c7ee86783479b0082ffa112279875d9..45324d368b94428540b20202d1daf6dc8f20bcae 100644 --- a/primitives/offchain/Cargo.toml +++ b/primitives/offchain/Cargo.toml @@ -1,14 +1,16 @@ [package] description = "Substrate offchain workers primitives" name = "sp-offchain" -version = "2.0.0" +version = "2.0.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/panic-handler/Cargo.toml b/primitives/panic-handler/Cargo.toml index 0fb6e3b173e15d81d15311f6ae3a82b535e7fc5a..592107b84f06a4f6d789f27e152b2a7b2c393caf 100644 --- a/primitives/panic-handler/Cargo.toml +++ b/primitives/panic-handler/Cargo.toml @@ -1,10 +1,13 @@ [package] name = "sp-panic-handler" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] -description = "Substrate panic handler." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Custom panic hook with bug report link" +documentation = "https://docs.rs/sp-panic-handler" [dependencies] backtrace = "0.3.38" diff --git a/primitives/phragmen/Cargo.toml b/primitives/phragmen/Cargo.toml index 3bfff32d2a481a86e1aca8b1261210006086ddc6..6a599bdabd830a746c17a595c2299dbbfcf7c339 100644 --- a/primitives/phragmen/Cargo.toml +++ b/primitives/phragmen/Cargo.toml @@ -1,18 +1,21 @@ [package] name = "sp-phragmen" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "PHRAGMENT primitives" [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } [dev-dependencies] -substrate-test-utils = { version = "2.0.0", path = "../../test-utils" } -sp-io ={ version = "2.0.0", path = "../../primitives/io" } +substrate-test-utils = { version = "2.0.0-alpha.2", path = "../../test-utils" } +sp-io ={ version = "2.0.0-alpha.2", path = "../../primitives/io" } rand = "0.7.2" [features] diff --git a/primitives/rpc/Cargo.toml b/primitives/rpc/Cargo.toml index 448f8b8c9cd4d714961d1c1f5b496e6cde7c3c13..13d91e71d34e47e51644b61fdac38d681b4beae4 100644 --- a/primitives/rpc/Cargo.toml +++ b/primitives/rpc/Cargo.toml @@ -1,13 +1,16 @@ [package] name = "sp-rpc" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate RPC primitives and utilities." [dependencies] serde = { version = "1.0.101", features = ["derive"] } -sp-core = { version = "2.0.0", path = "../core" } +sp-core = { version = "2.0.0-alpha.2", path = "../core" } [dev-dependencies] serde_json = "1.0.41" diff --git a/primitives/runtime-interface/Cargo.toml b/primitives/runtime-interface/Cargo.toml index cef3acdbfbbb3ce54a42738ea597fbf4a06ceb6a..70092c0587fe993e758e78dd74f1423ec204d803 100644 --- a/primitives/runtime-interface/Cargo.toml +++ b/primitives/runtime-interface/Cargo.toml @@ -1,26 +1,30 @@ [package] name = "sp-runtime-interface" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate runtime interface" +documentation = "https://docs.rs/sp-runtime-interface/" [dependencies] -sp-wasm-interface = { version = "2.0.0", path = "../wasm-interface", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime-interface-proc-macro = { version = "2.0.0", path = "proc-macro" } -sp-externalities = { version = "0.8.0", optional = true, path = "../externalities" } -codec = { package = "parity-scale-codec", version = "1.0.6", default-features = false } +sp-wasm-interface = { version = "2.0.0-alpha.2", path = "../wasm-interface", default-features = false } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-runtime-interface-proc-macro = { version = "2.0.0-alpha.2", path = "proc-macro" } +sp-externalities = { version = "0.8.0-alpha.2", optional = true, path = "../externalities" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } static_assertions = "1.0.0" primitive-types = { version = "0.6.2", default-features = false } [dev-dependencies] -sp-runtime-interface-test-wasm = { version = "2.0.0", path = "test-wasm" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } -sp-core = { version = "2.0.0", path = "../core" } -sp-io = { version = "2.0.0", path = "../io" } +sp-runtime-interface-test-wasm = { version = "2.0.0-dev", path = "test-wasm" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } +sp-core = { version = "2.0.0-alpha.2", path = "../core" } +sp-io = { version = "2.0.0-alpha.2", path = "../io" } rustversion = "1.0.0" -trybuild = "1.0.17" +trybuild = "1.0.23" [features] default = [ "std" ] diff --git a/primitives/runtime-interface/proc-macro/Cargo.toml b/primitives/runtime-interface/proc-macro/Cargo.toml index b239fbcce32aa0ef3276ea66c9b9bf7e868bccaa..3743b2e09a153d5734250b131b56125040516194 100644 --- a/primitives/runtime-interface/proc-macro/Cargo.toml +++ b/primitives/runtime-interface/proc-macro/Cargo.toml @@ -1,16 +1,20 @@ [package] name = "sp-runtime-interface-proc-macro" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "This crate provides procedural macros for usage within the context of the Substrate runtime interface." +documentation = "https://docs.rs/sp-runtime-interface-proc-macro" [lib] proc-macro = true [dependencies] syn = { version = "1.0.5", features = ["full", "visit", "fold", "extra-traits"] } -quote = "1.0.2" +quote = "1.0.3" proc-macro2 = "1.0.3" Inflector = "0.11.4" proc-macro-crate = "0.1.4" diff --git a/primitives/runtime-interface/test-wasm/Cargo.toml b/primitives/runtime-interface/test-wasm/Cargo.toml index c38413aee78e8711129158958291e8c08e14fb2a..b322e529d66735ea9e0418d0a3c2cf932c17eff3 100644 --- a/primitives/runtime-interface/test-wasm/Cargo.toml +++ b/primitives/runtime-interface/test-wasm/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "sp-runtime-interface-test-wasm" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -sp-runtime-interface = { version = "2.0.0", default-features = false, path = "../" } -sp-std = { version = "2.0.0", default-features = false, path = "../../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../../io" } -sp-core = { version = "2.0.0", default-features = false, path = "../../core" } +sp-runtime-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../io" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../core" } [build-dependencies] -wasm-builder-runner = { version = "1.0.3", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../../utils/wasm-builder-runner" } [features] default = [ "std" ] diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 4e99c0f06c1991bef86444641958bfb01059d2f4..ee7120b1b8cb59e17e2e524c86233afafda2c6f7 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -105,23 +105,6 @@ pub trait TestApi { } } -/// Two random external functions from the old runtime interface. -/// This ensures that we still inherently export these functions from the host and that we are still -/// compatible with old wasm runtimes. -#[cfg(not(feature = "std"))] -extern "C" { - pub fn ext_clear_storage(key_data: *const u8, key_len: u32); - pub fn ext_keccak_256(data: *const u8, len: u32, out: *mut u8); -} - -/// Make sure the old runtime interface needs to be imported. -#[no_mangle] -#[cfg(not(feature = "std"))] -pub fn force_old_runtime_interface_import() { - unsafe { ext_clear_storage(sp_std::ptr::null(), 0); } - unsafe { ext_keccak_256(sp_std::ptr::null(), 0, sp_std::ptr::null_mut()); } -} - /// This function is not used, but we require it for the compiler to include `sp-io`. /// `sp-io` is required for its panic and oom handler. #[no_mangle] diff --git a/primitives/runtime-interface/test/Cargo.toml b/primitives/runtime-interface/test/Cargo.toml index d6d724da9db0d76e8b6f22265e3bd0c8184ccfb2..53c05b68b3df285041dad7de43c4236c75566fb1 100644 --- a/primitives/runtime-interface/test/Cargo.toml +++ b/primitives/runtime-interface/test/Cargo.toml @@ -1,15 +1,17 @@ [package] name = "sp-runtime-interface-test" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" publish = false +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] -sp-runtime-interface = { version = "2.0.0", path = "../" } -sc-executor = { version = "0.8", path = "../../../client/executor" } -sp-runtime-interface-test-wasm = { version = "2.0.0", path = "../test-wasm" } -sp-state-machine = { version = "0.8", path = "../../../primitives/state-machine" } -sp-core = { version = "2.0.0", path = "../../core" } -sp-io = { version = "2.0.0", path = "../../io" } +sp-runtime-interface = { version = "2.0.0-alpha.2", path = "../" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } +sp-runtime-interface-test-wasm = { version = "2.0.0-dev", path = "../test-wasm" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../../primitives/state-machine" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../runtime" } +sp-io = { version = "2.0.0-alpha.2", path = "../../io" } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 559a4281e09f211a0c5553b76dac1e3231c06675..014a46e9d74ac6b1a209fd56d67e8bb5141926d9 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -22,27 +22,27 @@ use sp_runtime_interface::*; use sp_runtime_interface_test_wasm::{WASM_BINARY, test_api::HostFunctions}; use sp_wasm_interface::HostFunctions as HostFunctionsT; +use sc_executor::CallInWasm; -type TestExternalities = sp_state_machine::TestExternalities; +type TestExternalities = sp_state_machine::TestExternalities; fn call_wasm_method(method: &str) -> TestExternalities { let mut ext = TestExternalities::default(); let mut ext_ext = ext.ext(); + let mut host_functions = HF::host_functions(); + host_functions.extend(sp_io::SubstrateHostFunctions::host_functions()); - sc_executor::call_in_wasm::< - ( - HF, - sp_io::SubstrateHostFunctions, - sc_executor::deprecated_host_interface::SubstrateExternals - ) - >( + let executor = sc_executor::WasmExecutor::new( + sc_executor::WasmExecutionMethod::Interpreted, + Some(8), + host_functions, + false, + ); + executor.call_in_wasm( + &WASM_BINARY[..], method, &[], - sc_executor::WasmExecutionMethod::Interpreted, &mut ext_ext, - &WASM_BINARY[..], - 8, - false, ).expect(&format!("Executes `{}`", method)); ext @@ -88,7 +88,7 @@ fn test_return_input_public_key() { #[test] #[should_panic( - expected = "Other(\"Instantiation: Export ext_test_api_return_input_version_1 not found\")" + expected = "\"Instantiation: Export ext_test_api_return_input_version_1 not found\"" )] fn host_function_not_found() { call_wasm_method::<()>("test_return_data"); @@ -97,8 +97,9 @@ fn host_function_not_found() { #[test] #[should_panic( expected = - "FunctionExecution(\"ext_test_api_invalid_utf8_data_version_1\", \ - \"Invalid utf8 data provided\")" + "Executes `test_invalid_utf8_data_should_return_an_error`: \ + \"Trap: Trap { kind: Host(FunctionExecution(\\\"ext_test_api_invalid_utf8_data_version_1\\\", \ + \\\"Invalid utf8 data provided\\\")) }\"" )] fn test_invalid_utf8_data_should_return_an_error() { call_wasm_method::("test_invalid_utf8_data_should_return_an_error"); diff --git a/primitives/runtime/Cargo.toml b/primitives/runtime/Cargo.toml index bf7b2b80a3c5a278dc61745855c5c1096c8e9e3d..84e452a5b49eced112bd5941d09805d5d121bcf3 100644 --- a/primitives/runtime/Cargo.toml +++ b/primitives/runtime/Cargo.toml @@ -1,24 +1,30 @@ [package] name = "sp-runtime" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Runtime Modules shared primitive types." +documentation = "https://docs.rs/sp-runtime" + [dependencies] serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.1.2", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -sp-arithmetic = { version = "2.0.0", default-features = false, path = "../arithmetic" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } +sp-arithmetic = { version = "2.0.0-alpha.2", default-features = false, path = "../arithmetic" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../io" } log = { version = "0.4.8", optional = true } paste = "0.1.6" rand = { version = "0.7.2", optional = true } impl-trait-for-tuples = "0.1.3" -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } +hash256-std-hasher = { version = "0.15.2", default-features = false } [dev-dependencies] serde_json = "1.0.41" @@ -39,4 +45,5 @@ std = [ "serde", "sp-inherents/std", "parity-util-mem/std", + "hash256-std-hasher/std", ] diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index b2d247ef5f913c1753519a86d57505629322231f..25a8274354ae2aa60be8a97ff368aec2782abb2b 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -20,7 +20,6 @@ use crate::traits::{ self, Member, MaybeDisplay, SignedExtension, Dispatchable, }; -#[allow(deprecated)] use crate::traits::ValidateUnsigned; use crate::transaction_validity::TransactionValidity; @@ -46,15 +45,9 @@ where Origin: From>, Info: Clone, { - type AccountId = AccountId; type Call = Call; type DispatchInfo = Info; - fn sender(&self) -> Option<&Self::AccountId> { - self.signed.as_ref().map(|x| &x.0) - } - - #[allow(deprecated)] // Allow ValidateUnsigned fn validate>( &self, info: Self::DispatchInfo, @@ -69,7 +62,6 @@ where } } - #[allow(deprecated)] // Allow ValidateUnsigned fn apply>( self, info: Self::DispatchInfo, diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index 88f4323ad7e9ed60e382535756edfa17d96b78bc..bbc929526b75998c181346a87eeaa3e532dc441c 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -241,7 +241,7 @@ impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { sp_io::offchain::http_request_write_body(id, chunk.as_ref(), self.deadline)?; } - // finalise the request + // finalize the request sp_io::offchain::http_request_write_body(id, &[], self.deadline)?; Ok(PendingRequest { diff --git a/primitives/runtime/src/offchain/mod.rs b/primitives/runtime/src/offchain/mod.rs index dfc15360c6c91aa995e48f946d84eda741cb454c..9f0f949eaeb8df483df75b680fc1b872ae91ea4f 100644 --- a/primitives/runtime/src/offchain/mod.rs +++ b/primitives/runtime/src/offchain/mod.rs @@ -18,3 +18,5 @@ pub mod http; pub mod storage; + +pub use sp_core::offchain::*; diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs index e741aac09766fe50d14bef063f08a97ea71b50c4..c3cd3dfb90f76a2c21ef32f71b44f49c8db4fcd7 100644 --- a/primitives/runtime/src/random_number_generator.rs +++ b/primitives/runtime/src/random_number_generator.rs @@ -66,7 +66,7 @@ impl RandomNumberGenerator { loop { if self.offset() + needed > self.current.as_ref().len() { // rehash - self.current = Hashing::hash(self.current.as_ref()); + self.current = ::hash(self.current.as_ref()); self.offset = 0; } let data = &self.current.as_ref()[self.offset()..self.offset() + needed]; diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index be0e36b2d1a6b76ce7ec61382856c70b6fddffba..e840cdd100c2e05a366668c7205096b772ff8d0d 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -23,7 +23,6 @@ use crate::traits::{ self, Checkable, Applyable, BlakeTwo256, OpaqueKeys, SignedExtension, Dispatchable, }; -#[allow(deprecated)] use crate::traits::ValidateUnsigned; use crate::{generic::{self, CheckSignature}, KeyTypeId, ApplyExtrinsicResult}; pub use sp_core::{H256, sr25519}; @@ -411,14 +410,10 @@ impl Applyable for TestXt where Origin: From>, Info: Clone, { - type AccountId = u64; type Call = Call; type DispatchInfo = Info; - fn sender(&self) -> Option<&Self::AccountId> { self.signature.as_ref().map(|x| &x.0) } - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - #[allow(deprecated)] // Allow ValidateUnsigned fn validate>( &self, _info: Self::DispatchInfo, @@ -429,7 +424,6 @@ impl Applyable for TestXt where /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - #[allow(deprecated)] // Allow ValidateUnsigned fn apply>( self, info: Self::DispatchInfo, diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index f6655f68b450250f22cb8e2ae3e2a02e63f6158e..4e30e545e4ea5e166f9d608b7d8f5ec105426c48 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -25,7 +25,7 @@ use std::fmt::Display; use std::str::FromStr; #[cfg(feature = "std")] use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, Hasher, Blake2Hasher, TypeId, RuntimeDebug}; +use sp_core::{self, Hasher, TypeId, RuntimeDebug}; use crate::codec::{Codec, Encode, Decode}; use crate::transaction_validity::{ ValidTransaction, TransactionValidity, TransactionValidityError, UnknownTransaction, @@ -345,6 +345,14 @@ pub trait OnInitialize { fn on_initialize(_n: BlockNumber) {} } +/// The runtime upgrade trait. Implementing this lets you express what should happen +/// when the runtime upgrades, and changes may need to occur to your module. +#[impl_for_tuples(30)] +pub trait OnRuntimeUpgrade { + /// Perform a module upgrade. + fn on_runtime_upgrade() {} +} + /// Off-chain computation trait. /// /// Implementing this trait on a module allows you to perform long-running tasks @@ -369,20 +377,19 @@ pub trait OffchainWorker { /// Abstraction around hashing // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. -pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq { +pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq + Hasher::Output> { /// The hash type produced. type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; - /// The associated hash_db Hasher type. - type Hasher: Hasher; - /// Produce the hash of some byte-slice. - fn hash(s: &[u8]) -> Self::Output; + fn hash(s: &[u8]) -> Self::Output { + ::hash(s) + } /// Produce the hash of some codec-encodable value. fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, Self::hash) + Encode::using_encoded(s, ::hash) } /// The ordered Patricia tree root of the given `input`. @@ -397,12 +404,18 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + Parti #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub struct BlakeTwo256; -impl Hash for BlakeTwo256 { - type Output = sp_core::H256; - type Hasher = Blake2Hasher; - fn hash(s: &[u8]) -> Self::Output { +impl Hasher for BlakeTwo256 { + type Out = sp_core::H256; + type StdHasher = hash256_std_hasher::Hash256StdHasher; + const LENGTH: usize = 32; + + fn hash(s: &[u8]) -> Self::Out { sp_io::hashing::blake2_256(s).into() } +} + +impl Hash for BlakeTwo256 { + type Output = sp_core::H256; fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output { sp_io::trie::blake2_256_root(input) @@ -616,8 +629,6 @@ pub trait ExtrinsicMetadata { type SignedExtensions: SignedExtension; } -/// Extract the hasher type for a block. -pub type HasherFor = as Hash>::Hasher; /// Extract the hashing type for a block. pub type HashFor = <::Header as Header>::Hashing; /// Extract the number type for a block. @@ -876,20 +887,13 @@ impl SignedExtension for () { /// Also provides information on to whom this information is attributable and an index that allows /// each piece of attributable information to be disambiguated. pub trait Applyable: Sized + Send + Sync { - /// ID of the account that is responsible for this piece of information (sender). - type AccountId: Member + MaybeDisplay; - /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. type Call; /// An opaque set of information attached to the transaction. type DispatchInfo: Clone; - /// Returns a reference to the sender if any. - fn sender(&self) -> Option<&Self::AccountId>; - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - #[allow(deprecated)] // Allow ValidateUnsigned fn validate>( &self, info: Self::DispatchInfo, @@ -898,7 +902,6 @@ pub trait Applyable: Sized + Send + Sync { /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, /// index and sender. - #[allow(deprecated)] // Allow ValidateUnsigned fn apply>( self, info: Self::DispatchInfo, @@ -924,7 +927,6 @@ pub trait GetNodeBlockType { /// the transaction for the transaction pool. /// During block execution phase one need to perform the same checks anyway, /// since this function is not being called. -#[deprecated(note = "Use SignedExtensions instead.")] pub trait ValidateUnsigned { /// The call to validate type Call; diff --git a/primitives/sandbox/Cargo.toml b/primitives/sandbox/Cargo.toml index c942e019fe6f68462e3ac6f73a1ee6d125e56334..060801a29e41e28df705a58353aca85baefc19ac 100755 --- a/primitives/sandbox/Cargo.toml +++ b/primitives/sandbox/Cargo.toml @@ -1,17 +1,20 @@ [package] name = "sp-sandbox" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "This crate provides means to instantiate and execute wasm modules." [dependencies] wasmi = { version = "0.6.2", optional = true } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-io = { version = "2.0.0", default-features = false, path = "../io" } -sp-wasm-interface = { version = "2.0.0", default-features = false, path = "../wasm-interface" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../io" } +sp-wasm-interface = { version = "2.0.0-alpha.2", default-features = false, path = "../wasm-interface" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } [dev-dependencies] wabt = "0.9.2" diff --git a/primitives/serializer/Cargo.toml b/primitives/serializer/Cargo.toml index b9e78d968c66c2d538e45cb0a6bb93255398a372..39f78c078b0caa8ee28a079155910005fcbf3916 100644 --- a/primitives/serializer/Cargo.toml +++ b/primitives/serializer/Cargo.toml @@ -1,9 +1,13 @@ [package] name = "sp-serializer" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate customizable serde serializer." +documentation = "https://docs.rs/sp-serializer" [dependencies] serde = "1.0.101" diff --git a/primitives/session/Cargo.toml b/primitives/session/Cargo.toml index 143ff87942fd1b47cbe4b365b7ce7f69277d00a7..d01f7ee440cf12e01a9cbca40c03d9082c34d073 100644 --- a/primitives/session/Cargo.toml +++ b/primitives/session/Cargo.toml @@ -1,15 +1,18 @@ [package] name = "sp-session" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Primitives for sessions" [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } -sp-runtime = { version = "2.0.0", optional = true, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } +sp-runtime = { version = "2.0.0-alpha.2", optional = true, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/staking/Cargo.toml b/primitives/staking/Cargo.toml index 97afa0d0a76109549b62cd9f8f57ae85d6e8377f..2f85b8251b0ae4035aa98cceeaf56a6737c7cf21 100644 --- a/primitives/staking/Cargo.toml +++ b/primitives/staking/Cargo.toml @@ -1,14 +1,17 @@ [package] name = "sp-staking" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "A crate which contains primitives that are useful for implementation that uses staking approaches in general. Definitions related to sessions, slashing, etc go here." [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } [features] default = ["std"] diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index cac1ed065ceb7916242f45cf564eb29a08697b10..06e73f018b7656a691801b93b2178631cacd5ddd 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -91,14 +91,37 @@ pub trait Offence { ) -> Perbill; } +/// Errors that may happen on offence reports. +#[derive(PartialEq, sp_runtime::RuntimeDebug)] +pub enum OffenceError { + /// The report has already been sumbmitted. + DuplicateReport, + + /// Other error has happened. + Other(u8), +} + +impl sp_runtime::traits::Printable for OffenceError { + fn print(&self) { + "OffenceError".print(); + match self { + Self::DuplicateReport => "DuplicateReport".print(), + Self::Other(e) => { + "Other".print(); + e.print(); + } + } + } +} + /// A trait for decoupling offence reporters from the actual handling of offence reports. pub trait ReportOffence> { /// Report an `offence` and reward given `reporters`. - fn report_offence(reporters: Vec, offence: O); + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError>; } impl> ReportOffence for () { - fn report_offence(_reporters: Vec, _offence: O) {} + fn report_offence(_reporters: Vec, _offence: O) -> Result<(), OffenceError> { Ok(()) } } /// A trait to take action on an offence. diff --git a/primitives/state-machine/Cargo.toml b/primitives/state-machine/Cargo.toml index 2408cce099b24f47fd168e6bb5e899b6964ccc4d..16f921c3ac137171d7a90f7820b8c41dcc382457 100644 --- a/primitives/state-machine/Cargo.toml +++ b/primitives/state-machine/Cargo.toml @@ -1,10 +1,13 @@ [package] name = "sp-state-machine" -version = "0.8.0" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Substrate State Machine" edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sp-state-machine" [dependencies] log = "0.4.8" @@ -12,16 +15,17 @@ parking_lot = "0.10.0" hash-db = "0.15.2" trie-db = "0.20.0" trie-root = "0.16.0" -sp-trie = { version = "2.0.0", path = "../trie" } -sp-core = { version = "2.0.0", path = "../core" } -sp-panic-handler = { version = "2.0.0", path = "../panic-handler" } -codec = { package = "parity-scale-codec", version = "1.0.0" } +sp-trie = { version = "2.0.0-alpha.2", path = "../trie" } +sp-core = { version = "2.0.0-alpha.2", path = "../core" } +sp-panic-handler = { version = "2.0.0-alpha.2", path = "../panic-handler" } +codec = { package = "parity-scale-codec", version = "1.2.0" } num-traits = "0.2.8" rand = "0.7.2" -sp-externalities = { version = "0.8.0", path = "../externalities" } +sp-externalities = { version = "0.8.0-alpha.2", path = "../externalities" } [dev-dependencies] hex-literal = "0.2.1" +sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } [features] default = [] diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index d905657737a8a085492e4d78f5f135404dc8b651..7252ae10e9073429f7b8b52365d5b7790ccb3405 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -298,6 +298,10 @@ impl Externalities for BasicExternalities { fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { Ok(None) } + + fn wipe(&mut self) {} + + fn commit(&mut self) {} } impl sp_externalities::ExtensionStore for BasicExternalities { @@ -346,7 +350,7 @@ mod tests { top: Default::default(), children: map![ child_storage.clone() => StorageChild { - data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], + data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], child_info: CHILD_INFO_1.to_owned(), } ] diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 9f2d44967d71612f924c1716231db05895e7d6dc..685786218c75f42100742726e22ada3d651792be 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -376,13 +376,13 @@ impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> #[cfg(test)] mod tests { use std::iter::FromIterator; - use sp_core::Blake2Hasher; use crate::changes_trie::Configuration; use crate::changes_trie::input::InputPair; use crate::changes_trie::storage::InMemoryStorage; + use sp_runtime::traits::BlakeTwo256; use super::*; - fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { let config = Configuration { digest_interval: 4, digest_levels: 2 }; let backend = InMemoryStorage::with_inputs(vec![ // digest: 1..4 => [(3, 0)] @@ -447,7 +447,7 @@ mod tests { #[test] fn drilldown_iterator_works() { let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 1, @@ -458,7 +458,7 @@ mod tests { ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 1, @@ -469,7 +469,7 @@ mod tests { ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![])); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 1, @@ -480,7 +480,7 @@ mod tests { ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 1, @@ -491,7 +491,7 @@ mod tests { ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 7, @@ -502,7 +502,7 @@ mod tests { ).and_then(Result::from_iter); assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( configuration_range(&config, 0), &storage, 5, @@ -519,7 +519,7 @@ mod tests { let (config, storage) = prepare_for_drilldown(); storage.clear_storage(); - assert!(key_changes::( + assert!(key_changes::( configuration_range(&config, 0), &storage, 1, @@ -529,7 +529,7 @@ mod tests { &[42], ).and_then(|i| i.collect::, _>>()).is_err()); - assert!(key_changes::( + assert!(key_changes::( configuration_range(&config, 0), &storage, 1, @@ -543,7 +543,7 @@ mod tests { #[test] fn drilldown_iterator_fails_when_range_is_invalid() { let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::( + assert!(key_changes::( configuration_range(&config, 0), &storage, 1, @@ -552,7 +552,7 @@ mod tests { None, &[42], ).is_err()); - assert!(key_changes::( + assert!(key_changes::( configuration_range(&config, 0), &storage, 20, @@ -570,12 +570,12 @@ mod tests { // create drilldown iterator that records all trie nodes during drilldown let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::( + let remote_proof = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]).unwrap(); let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof_child = key_changes_proof::( + let remote_proof_child = key_changes_proof::( configuration_range(&remote_config, 0), &remote_storage, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap(); @@ -584,13 +584,13 @@ mod tests { // create drilldown iterator that works the same, but only depends on trie let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); - let local_result = key_changes_proof_check::( + let local_result = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]); let (local_config, local_storage) = prepare_for_drilldown(); local_storage.clear_storage(); - let local_result_child = key_changes_proof_check::( + let local_result_child = key_changes_proof_check::( configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]); @@ -621,7 +621,7 @@ mod tests { input[91 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); let storage = InMemoryStorage::with_inputs(input, vec![]); - let drilldown_result = key_changes::( + let drilldown_result = key_changes::( config_range, &storage, 1, diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index f6be3223ae9f89137cda391d2202997605f10844..87923dc2f593c183ac4fd791e3cbe742be51e0c9 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -114,14 +114,15 @@ fn prune_trie( mod tests { use std::collections::HashSet; use sp_trie::MemoryDB; - use sp_core::{H256, Blake2Hasher}; + use sp_core::H256; use crate::backend::insert_into_memory_db; use crate::changes_trie::storage::InMemoryStorage; use codec::Encode; + use sp_runtime::traits::BlakeTwo256; use super::*; fn prune_by_collect( - storage: &dyn Storage, + storage: &dyn Storage, first: u64, last: u64, current_block: u64, @@ -135,27 +136,26 @@ mod tests { #[test] fn prune_works() { - fn prepare_storage() -> InMemoryStorage { - + fn prepare_storage() -> InMemoryStorage { let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode(); - let mut mdb1 = MemoryDB::::default(); - let root1 = insert_into_memory_db::( + let mut mdb1 = MemoryDB::::default(); + let root1 = insert_into_memory_db::( &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); - let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::( + let mut mdb2 = MemoryDB::::default(); + let root2 = insert_into_memory_db::( &mut mdb2, vec![(vec![11], vec![21]), (vec![12], vec![22])], ).unwrap(); - let mut mdb3 = MemoryDB::::default(); - let ch_root3 = insert_into_memory_db::( + let mut mdb3 = MemoryDB::::default(); + let ch_root3 = insert_into_memory_db::( &mut mdb3, vec![(vec![110], vec![120])]).unwrap(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![ + let root3 = insert_into_memory_db::(&mut mdb3, vec![ (vec![13], vec![23]), (vec![14], vec![24]), (child_key, ch_root3.as_ref().encode()), ]).unwrap(); - let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::( + let mut mdb4 = MemoryDB::::default(); + let root4 = insert_into_memory_db::( &mut mdb4, vec![(vec![15], vec![25])], ).unwrap(); diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index 0a29468bbc4efd024e811d8782adc34a14b7d580..7e474d45b650e480f35ee82bcae34828878a2ab3 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -362,11 +362,12 @@ impl Backend for InMemory where H::Out: Codec { #[cfg(test)] mod tests { use super::*; + use sp_runtime::traits::BlakeTwo256; /// Assert in memory backend with only child trie keys works as trie backend. #[test] fn in_memory_with_child_trie_only() { - let storage = InMemory::::default(); + let storage = InMemory::::default(); let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); let mut storage = storage.update( vec![( diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index bb2bb2c52c86f153c47e3a32b23aced7d3c4d5d2..ff41237c83131d29cbb1c01d970c6fd8c8f9fc2c 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -692,7 +692,8 @@ mod tests { use super::*; use super::ext::Ext; use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{Blake2Hasher, map, traits::Externalities, storage::ChildStorageKey}; + use sp_core::{map, traits::Externalities, storage::ChildStorageKey}; + use sp_runtime::traits::BlakeTwo256; #[derive(Clone)] struct DummyCodeExecutor { @@ -708,12 +709,11 @@ mod tests { type Error = u8; fn call< - E: Externalities, R: Encode + Decode + PartialEq, NC: FnOnce() -> result::Result, >( &self, - ext: &mut E, + ext: &mut dyn Externalities, _method: &str, _data: &[u8], use_native: bool, @@ -867,7 +867,7 @@ mod tests { ).unwrap(); // check proof locally - let local_result = execution_proof_check::( + let local_result = execution_proof_check::( remote_root, remote_proof, &mut Default::default(), @@ -889,7 +889,7 @@ mod tests { b"abc".to_vec() => b"2".to_vec(), b"bbb".to_vec() => b"3".to_vec() ]; - let mut state = InMemoryBackend::::from(initial); + let mut state = InMemoryBackend::::from(initial); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges { committed: map![ @@ -932,7 +932,7 @@ mod tests { #[test] fn set_child_storage_works() { - let mut state = InMemoryBackend::::default(); + let mut state = InMemoryBackend::::default(); let backend = state.as_trie_backend().unwrap(); let mut overlay = OverlayedChanges::default(); let mut cache = StorageTransactionCache::default(); @@ -979,12 +979,12 @@ mod tests { let remote_root = remote_backend.storage_root(::std::iter::empty()).0; let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); // check proof locally - let local_result1 = read_proof_check::( + let local_result1 = read_proof_check::( remote_root, remote_proof.clone(), &[b"value2"], ).unwrap(); - let local_result2 = read_proof_check::( + let local_result2 = read_proof_check::( remote_root, remote_proof.clone(), &[&[0xff]], @@ -1004,13 +1004,13 @@ mod tests { CHILD_INFO_1, &[b"value3"], ).unwrap(); - let local_result1 = read_child_proof_check::( + let local_result1 = read_child_proof_check::( remote_root, remote_proof.clone(), b":child_storage:default:sub1", &[b"value3"], ).unwrap(); - let local_result2 = read_child_proof_check::( + let local_result2 = read_child_proof_check::( remote_root, remote_proof.clone(), b":child_storage:default:sub1", diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 672ec6ea794e4e42771d3089b69bb97016dfeb94..7b6e8e0e698840b369aca9b882039d03509d92be 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -398,16 +398,17 @@ mod tests { use crate::InMemoryBackend; use crate::trie_backend::tests::test_trie; use super::*; - use sp_core::{Blake2Hasher, storage::ChildStorageKey}; + use sp_core::storage::ChildStorageKey; use crate::proving_backend::create_proof_check_backend; use sp_trie::PrefixedMemoryDB; + use sp_runtime::traits::BlakeTwo256; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); fn test_proving<'a>( - trie_backend: &'a TrieBackend,Blake2Hasher>, - ) -> ProvingBackend<'a, PrefixedMemoryDB, Blake2Hasher> { + trie_backend: &'a TrieBackend,BlakeTwo256>, + ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { ProvingBackend::new(trie_backend) } @@ -428,7 +429,7 @@ mod tests { #[test] fn proof_is_invalid_when_does_not_contains_root() { use sp_core::H256; - let result = create_proof_check_backend::( + let result = create_proof_check_backend::( H256::from_low_u64_be(1), StorageProof::empty() ); @@ -451,7 +452,7 @@ mod tests { #[test] fn proof_recorded_and_checked() { let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); + let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(vec![(None, contents)]); let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); @@ -466,7 +467,7 @@ mod tests { let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); } @@ -483,7 +484,7 @@ mod tests { (Some((own2.clone(), CHILD_INFO_2.to_owned())), (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), ]; - let in_memory = InMemoryBackend::::default(); + let in_memory = InMemoryBackend::::default(); let mut in_memory = in_memory.update(contents); let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( ::std::iter::empty(), @@ -515,7 +516,7 @@ mod tests { let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( + let proof_check = create_proof_check_backend::( in_memory_root.into(), proof ).unwrap(); @@ -529,7 +530,7 @@ mod tests { assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( + let proof_check = create_proof_check_backend::( in_memory_root.into(), proof ).unwrap(); diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 39a34509b720bbb29ec24f755748a2445e8701ee..aec42c76787074597a6e766a34042a1af574a714 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -34,13 +34,12 @@ use sp_core::{ well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, Storage, }, - Blake2Hasher, }; use codec::Encode; use sp_externalities::{Extensions, Extension}; /// Simple HashMap-based Externalities impl. -pub struct TestExternalities +pub struct TestExternalities where H::Out: codec::Codec, { @@ -198,11 +197,12 @@ impl sp_externalities::ExtensionStore for TestExternalities where mod tests { use super::*; use sp_core::traits::Externalities; + use sp_runtime::traits::BlakeTwo256; use hex_literal::hex; #[test] fn commit_should_work() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); @@ -213,7 +213,7 @@ mod tests { #[test] fn set_and_retrieve_code() { - let mut ext = TestExternalities::::default(); + let mut ext = TestExternalities::::default(); let mut ext = ext.ext(); let code = vec![1, 2, 3]; @@ -225,6 +225,6 @@ mod tests { #[test] fn check_send() { fn assert_send() {} - assert_send::>(); + assert_send::>(); } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index dbaae323c09f29bae7ba9bba75e776963dfbbf3e..e64dd590e51f711f2e59f0224f66e980121ee35f 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -245,9 +245,10 @@ impl, H: Hasher> Backend for TrieBackend where #[cfg(test)] pub mod tests { use std::collections::HashSet; - use sp_core::{Blake2Hasher, H256}; + use sp_core::H256; use codec::Encode; use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; + use sp_runtime::traits::BlakeTwo256; use super::*; const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; @@ -255,9 +256,9 @@ pub mod tests { const CHILD_UUID_1: &[u8] = b"unique_id_1"; const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - fn test_db() -> (PrefixedMemoryDB, H256) { + fn test_db() -> (PrefixedMemoryDB, H256) { let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); + let mut mdb = PrefixedMemoryDB::::default(); { let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); let mut trie = TrieDBMut::new(&mut mdb, &mut root); @@ -281,7 +282,7 @@ pub mod tests { (mdb, root) } - pub(crate) fn test_trie() -> TrieBackend, Blake2Hasher> { + pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { let (mdb, root) = test_db(); TrieBackend::new(mdb, root) } @@ -312,7 +313,7 @@ pub mod tests { #[test] fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, Blake2Hasher>::new( + assert!(TrieBackend::, BlakeTwo256>::new( PrefixedMemoryDB::default(), Default::default(), ).pairs().is_empty()); diff --git a/primitives/std/Cargo.toml b/primitives/std/Cargo.toml index dd4b7e4511f3369a6faf71552fc2281c51c77ff5..38a0c713c0563d88758ce4ace6e9eb790fc0369e 100644 --- a/primitives/std/Cargo.toml +++ b/primitives/std/Cargo.toml @@ -1,9 +1,14 @@ [package] name = "sp-std" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" + +description = "Lowest-abstraction level for the Substrate runtime: just exports useful primitives from std or client/alloc to be used with any code that depends on the runtime." +documentation = "https://docs.rs/sp-std" [features] default = ["std"] diff --git a/primitives/storage/Cargo.toml b/primitives/storage/Cargo.toml index c9fda1816b55e52d73885ceda1c1aaec0b422f9d..7e434cdd898212aed62582e9e8dbcef92a16bd45 100644 --- a/primitives/storage/Cargo.toml +++ b/primitives/storage/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "sp-storage" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" description = "Storage related primitives" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +documentation = "https://docs.rs/sp-storage/" [dependencies] -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } serde = { version = "1.0.101", optional = true, features = ["derive"] } impl-serde = { version = "0.2.3", optional = true } -sp-debug-derive = { version = "2.0.0", path = "../debug-derive" } +sp-debug-derive = { version = "2.0.0-alpha.2", path = "../debug-derive" } [features] default = [ "std" ] diff --git a/primitives/test-primitives/Cargo.toml b/primitives/test-primitives/Cargo.toml index 2edd5f05751d9f142e0613d4b9fca7b110f0ba17..1879ca26b7e1d5f6903c54d9182639cd1e6aca4e 100644 --- a/primitives/test-primitives/Cargo.toml +++ b/primitives/test-primitives/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "sp-test-primitives" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../application-crypto" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../application-crypto" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [features] diff --git a/primitives/timestamp/Cargo.toml b/primitives/timestamp/Cargo.toml index 815aaf5305fb536f3333c89214795c4dccded3f7..1fc40a113f90378078398c633f508a9871c843dc 100644 --- a/primitives/timestamp/Cargo.toml +++ b/primitives/timestamp/Cargo.toml @@ -1,16 +1,19 @@ [package] name = "sp-timestamp" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate core types and inherents for timestamps." [dependencies] -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -sp-inherents = { version = "2.0.0", default-features = false, path = "../inherents" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../inherents" } impl-trait-for-tuples = "0.1.3" wasm-timer = "0.2" diff --git a/primitives/transaction-pool/Cargo.toml b/primitives/transaction-pool/Cargo.toml index 18b254f2c093b9a63fda2284629a8f99ecf76f5a..69ec59cf5ef92015bde57140311f6d71f716c6fe 100644 --- a/primitives/transaction-pool/Cargo.toml +++ b/primitives/transaction-pool/Cargo.toml @@ -1,18 +1,23 @@ [package] name = "sp-transaction-pool" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Transaction pool primitives types & Runtime API." +documentation = "https://docs.rs/sp-transaction-pool" + [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", optional = true } +codec = { package = "parity-scale-codec", version = "1.2.0", optional = true } derive_more = { version = "0.99.2", optional = true } futures = { version = "0.3.1", optional = true } log = { version = "0.4.8", optional = true } serde = { version = "1.0.101", features = ["derive"], optional = true} -sp-api = { version = "2.0.0", default-features = false, path = "../api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } [features] default = [ "std" ] diff --git a/primitives/trie/Cargo.toml b/primitives/trie/Cargo.toml index 361ddb186cca7077bc423bfd2c9c0759da885ecb..ff3c04b54193f0778bb5d503e8b3544da5054ebc 100644 --- a/primitives/trie/Cargo.toml +++ b/primitives/trie/Cargo.toml @@ -1,30 +1,33 @@ [package] name = "sp-trie" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] description = "Patricia trie stuff using a parity-scale-codec node format" -repository = "https://github.com/paritytech/substrate" +repository = "https://github.com/paritytech/substrate/" license = "GPL-3.0" edition = "2018" +homepage = "https://substrate.dev" +documentation = "https://docs.rs/sp-trie" [[bench]] name = "bench" harness = false [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } hash-db = { version = "0.15.2", default-features = false } trie-db = { version = "0.20.0", default-features = false } trie-root = { version = "0.16.0", default-features = false } memory-db = { version = "0.19.0", default-features = false } -sp-core = { version = "2.0.0", default-features = false, path = "../core" } +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../core" } [dev-dependencies] trie-bench = "0.20.0" trie-standardmap = "0.15.2" criterion = "0.2.11" hex-literal = "0.2.1" +sp-runtime = { version = "2.0.0-alpha.2", path = "../runtime" } [features] default = ["std"] diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index 72a53c18d2526909b839718cdb00097ae5e04383..d385b4bacd4c0e06d02e30f49aea7b758749cf64 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -20,11 +20,11 @@ criterion_main!(benches); fn benchmark(c: &mut Criterion) { trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::Layout, sp_trie::TrieStream, >(c, "substrate-blake2"); trie_bench::standard_benchmark::< - sp_trie::Layout, + sp_trie::Layout, sp_trie::TrieStream, >(c, "substrate-keccak"); } diff --git a/primitives/version/Cargo.toml b/primitives/version/Cargo.toml index 1219d9840c30e772907016fccd26e647a9286140..edaf940465bd679cf94e9c319bc681d3ee680516 100644 --- a/primitives/version/Cargo.toml +++ b/primitives/version/Cargo.toml @@ -1,16 +1,21 @@ [package] name = "sp-version" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Version module for the Substrate runtime; Provides a function that returns the runtime version." +documentation = "https://docs.rs/sp-version" + [dependencies] impl-serde = { version = "0.2.3", optional = true } serde = { version = "1.0.101", optional = true, features = ["derive"] } -codec = { package = "parity-scale-codec", version = "1.1.2", default-features = false, features = ["derive"] } -sp-std = { version = "2.0.0", default-features = false, path = "../std" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../runtime" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../std" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../runtime" } [features] default = ["std"] diff --git a/primitives/wasm-interface/Cargo.toml b/primitives/wasm-interface/Cargo.toml index bb9ea85872a87761b027d2b50f9ee159b45ef680..bbe53df258271b74c4fd36a035368c915cc28768 100644 --- a/primitives/wasm-interface/Cargo.toml +++ b/primitives/wasm-interface/Cargo.toml @@ -1,15 +1,19 @@ [package] name = "sp-wasm-interface" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Types and traits for interfacing between the host and the wasm runtime." +documentation = "https://docs.rs/sp-wasm-interface" [dependencies] wasmi = { version = "0.6.2", optional = true } impl-trait-for-tuples = "0.1.2" -sp-std = { version = "2.0.0", path = "../std", default-features = false } -codec = { package = "parity-scale-codec", version = "1.1.2", default-features = false, features = ["derive"] } +sp-std = { version = "2.0.0-alpha.2", path = "../std", default-features = false } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } [features] default = [ "std" ] diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index 7bb4469c771ffb0066677fa2c2bda3a719de244b..eda2ebb1b527ae876d3275608e20aec04ba5223c 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -170,6 +170,12 @@ impl Pointer { } } +impl From for Pointer { + fn from(ptr: u32) -> Self { + Pointer::new(ptr) + } +} + impl From> for u32 { fn from(ptr: Pointer) -> Self { ptr.ptr diff --git a/test-utils/Cargo.toml b/test-utils/Cargo.toml index 0a552973c9aeea3fed1ec6fbb1e6a4075268fe3d..d3477b94a6f95e608b6df5c75ca03845af386825 100644 --- a/test-utils/Cargo.toml +++ b/test-utils/Cargo.toml @@ -1,6 +1,8 @@ [package] name = "substrate-test-utils" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" diff --git a/test-utils/client/Cargo.toml b/test-utils/client/Cargo.toml index 2107f3bdd8b4c81a6f03c61f0b700176390c78db..77a614e5fecca7ed6686da5e846dfe975ab4e64b 100644 --- a/test-utils/client/Cargo.toml +++ b/test-utils/client/Cargo.toml @@ -1,21 +1,24 @@ [package] name = "substrate-test-client" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -sc-client-api = { version = "2.0.0", path = "../../client/api" } -sc-client = { version = "0.8", path = "../../client/" } -sc-client-db = { version = "0.8", features = ["test-helpers"], path = "../../client/db" } -sp-consensus = { version = "0.8", path = "../../primitives/consensus/common" } -sc-executor = { version = "0.8", path = "../../client/executor" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../client/api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../client/" } +sc-client-db = { version = "0.8.0-alpha.2", features = ["test-helpers"], path = "../../client/db" } +sp-consensus = { version = "0.8.0-alpha.2", path = "../../primitives/consensus/common" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../client/executor" } futures = "0.3.1" hash-db = "0.15.2" -sp-keyring = { version = "2.0.0", path = "../../primitives/keyring" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-core = { version = "2.0.0", path = "../../primitives/core" } -sp-runtime = { version = "2.0.0", path = "../../primitives/runtime" } -sp-blockchain = { version = "2.0.0", path = "../../primitives/blockchain" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sp-keyring = { version = "2.0.0-alpha.2", path = "../../primitives/keyring" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-core = { version = "2.0.0-alpha.2", path = "../../primitives/core" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../primitives/runtime" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../primitives/blockchain" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index afe11903d5be969471ea39a4e6eda477b2d35e89..37fa1e2b34e522267a6cff8b6740050dece7e2d8 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -33,7 +33,7 @@ pub use sp_keyring::{ ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, }; -pub use sp_core::{Blake2Hasher, traits::BareCryptoStorePtr}; +pub use sp_core::traits::BareCryptoStorePtr; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; pub use self::client_ext::{ClientExt, ClientBlockImportExt}; @@ -41,13 +41,13 @@ pub use self::client_ext::{ClientExt, ClientBlockImportExt}; use std::sync::Arc; use std::collections::HashMap; use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_runtime::traits::Block as BlockT; +use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; use sc_client::LocalCallExecutor; /// Test client light database backend. pub type LightBackend = sc_client::light::backend::Backend< sc_client_db::light::LightStorage, - Blake2Hasher, + BlakeTwo256, >; /// A genesis storage initialization trait. @@ -210,7 +210,8 @@ impl TestClientBuilder"] edition = "2018" build = "build.rs" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -sp-application-crypto = { version = "2.0.0", default-features = false, path = "../../primitives/application-crypto" } -sp-consensus-aura = { version = "0.8", default-features = false, path = "../../primitives/consensus/aura" } -sp-consensus-babe = { version = "0.8", default-features = false, path = "../../primitives/consensus/babe" } -sp-block-builder = { version = "2.0.0", default-features = false, path = "../../primitives/block-builder" } +sp-application-crypto = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/application-crypto" } +sp-consensus-aura = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/aura" } +sp-consensus-babe = { version = "0.8.0-alpha.2", default-features = false, path = "../../primitives/consensus/babe" } +sp-block-builder = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/block-builder" } cfg-if = "0.1.10" -codec = { package = "parity-scale-codec", version = "1.0.0", default-features = false, features = ["derive"] } -frame-executive = { version = "2.0.0", default-features = false, path = "../../frame/executive" } -sp-inherents = { version = "2.0.0", default-features = false, path = "../../primitives/inherents" } -sp-keyring = { version = "2.0.0", optional = true, path = "../../primitives/keyring" } +codec = { package = "parity-scale-codec", version = "1.2.0", default-features = false, features = ["derive"] } +frame-executive = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/executive" } +sp-inherents = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/inherents" } +sp-keyring = { version = "2.0.0-alpha.2", optional = true, path = "../../primitives/keyring" } log = { version = "0.4.8", optional = true } memory-db = { version = "0.19.0", default-features = false } -sp-offchain = { path = "../../primitives/offchain", default-features = false} -sp-core = { version = "2.0.0", default-features = false, path = "../../primitives/core" } -sp-std = { version = "2.0.0", default-features = false, path = "../../primitives/std" } -sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false} -sp-io = { version = "2.0.0", default-features = false, path = "../../primitives/io" } -frame-support = { version = "2.0.0", default-features = false, path = "../../frame/support" } -sp-version = { version = "2.0.0", default-features = false, path = "../../primitives/version" } +sp-offchain = { path = "../../primitives/offchain", default-features = false, version = "2.0.0-alpha.2"} +sp-core = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/core" } +sp-std = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/std" } +sp-runtime-interface = { path = "../../primitives/runtime-interface", default-features = false, version = "2.0.0-alpha.2"} +sp-io = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/io" } +frame-support = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/support" } +sp-version = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/version" } serde = { version = "1.0.101", optional = true, features = ["derive"] } -sp-session = { version = "2.0.0", default-features = false, path = "../../primitives/session" } -sp-api = { version = "2.0.0", default-features = false, path = "../../primitives/api" } -sp-runtime = { version = "2.0.0", default-features = false, path = "../../primitives/runtime" } -pallet-babe = { version = "2.0.0", default-features = false, path = "../../frame/babe" } -frame-system = { version = "2.0.0", default-features = false, path = "../../frame/system" } -frame-system-rpc-runtime-api = { version = "2.0.0", default-features = false, path = "../../frame/system/rpc/runtime-api" } -pallet-timestamp = { version = "2.0.0", default-features = false, path = "../../frame/timestamp" } -sc-client = { version = "0.8", optional = true, path = "../../client" } -sp-trie = { version = "2.0.0", default-features = false, path = "../../primitives/trie" } -sp-transaction-pool = { version = "2.0.0", default-features = false, path = "../../primitives/transaction-pool" } +sp-session = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/session" } +sp-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/api" } +sp-runtime = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/runtime" } +pallet-babe = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/babe" } +frame-system = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/system" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/system/rpc/runtime-api" } +pallet-timestamp = { version = "2.0.0-alpha.2", default-features = false, path = "../../frame/timestamp" } +sc-client = { version = "0.8.0-alpha.2", optional = true, path = "../../client" } +sp-trie = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/trie" } +sp-transaction-pool = { version = "2.0.0-alpha.2", default-features = false, path = "../../primitives/transaction-pool" } trie-db = { version = "0.20.0", default-features = false } parity-util-mem = { version = "0.5.1", default-features = false, features = ["primitive-types"] } [dev-dependencies] -sc-executor = { version = "0.8", path = "../../client/executor" } -substrate-test-runtime-client = { version = "2.0.0", path = "./client" } -sp-state-machine = { version = "0.8", path = "../../primitives/state-machine" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../client/executor" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "./client" } +sp-state-machine = { version = "0.8.0-alpha.2", path = "../../primitives/state-machine" } [build-dependencies] -wasm-builder-runner = { version = "1.0.4", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } +wasm-builder-runner = { version = "1.0.5", package = "substrate-wasm-builder-runner", path = "../../utils/wasm-builder-runner" } [features] default = [ diff --git a/test-utils/runtime/client/Cargo.toml b/test-utils/runtime/client/Cargo.toml index 643cde16b3090ea44154448e50012414ac38264b..5f16e77860ba68083ce8c4e1ff6088cb78d156fc 100644 --- a/test-utils/runtime/client/Cargo.toml +++ b/test-utils/runtime/client/Cargo.toml @@ -1,19 +1,22 @@ [package] name = "substrate-test-runtime-client" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -sc-block-builder = { version = "0.8", path = "../../../client/block-builder" } -substrate-test-client = { version = "2.0.0", path = "../../client" } -sp-core = { version = "2.0.0", path = "../../../primitives/core" } -substrate-test-runtime = { version = "2.0.0", path = "../../runtime" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../primitives/api" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -codec = { package = "parity-scale-codec", version = "1.0.0" } -sc-client-api = { version = "2.0.0", path = "../../../client/api" } -sc-client = { version = "0.8", path = "../../../client/" } +sc-block-builder = { version = "0.8.0-alpha.2", path = "../../../client/block-builder" } +substrate-test-client = { version = "2.0.0-dev", path = "../../client" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../primitives/core" } +substrate-test-runtime = { version = "2.0.0-dev", path = "../../runtime" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../primitives/api" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sc-client-api = { version = "2.0.0-alpha.2", path = "../../../client/api" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client/" } futures = "0.3.1" diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index 6b9a6f79ab191f6300f53594d074a0f75bc18d6b..3a9f54d06cb2b4bdcb22d2029abb08343bce5ec6 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -19,7 +19,7 @@ use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::ChangesTrieConfiguration; use sc_client_api::backend; -use sp_runtime::traits::HasherFor; +use sp_runtime::traits::HashFor; use sc_block_builder::BlockBuilderApi; @@ -50,7 +50,7 @@ impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_ B: backend::Backend, // Rust bug: https://github.com/rust-lang/rust/issues/24159 backend::StateBackendFor: - sp_api::StateBackend>, + sp_api::StateBackend>, { fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error> { self.push(transfer.into_signed_tx()) diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index 21cf94dfa673ade329c1615fd5df2c4e9b7519b6..9a5089302471b792cbef658ef9ed0630d26e8067 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -33,7 +33,7 @@ pub use self::block_builder_ext::BlockBuilderExt; use sp_core::{sr25519, ChangesTrieConfiguration}; use sp_core::storage::{ChildInfo, Storage, StorageChild}; use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HasherFor}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HashFor}; use sc_client::{ light::fetcher::{ Fetcher, @@ -82,7 +82,7 @@ pub type LightExecutor = sc_client::light::call_executor::GenesisCallExecutor< sc_client::LocalCallExecutor< sc_client::light::backend::Backend< sc_client_db::light::LightStorage, - HasherFor + HashFor >, NativeExecutor > @@ -243,7 +243,7 @@ impl TestClientBuilderExt for TestClientBuilder< B: sc_client_api::backend::Backend + 'static, // Rust bug: https://github.com/rust-lang/rust/issues/24159 >::State: - sp_api::StateBackend>, + sp_api::StateBackend>, { fn genesis_init_mut(&mut self) -> &mut GenesisParameters { Self::genesis_init_mut(self) diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index 05db43298fd49b933e99d547784f55fcde718443..4af8aa37b640a27cd41f7859837a3808525b0327 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -29,14 +29,15 @@ use sc_client_api::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; use substrate_test_client::sp_consensus::BlockOrigin; use substrate_test_runtime::{self, Transfer}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, HasherFor}; +use sp_runtime::traits::{Block as BlockT, HashFor}; +use sc_block_builder::BlockBuilderProvider; /// helper to test the `leaves` implementation for various backends pub fn test_leaves_for_backend(backend: Arc) where B: backend::Backend, // Rust bug: https://github.com/rust-lang/rust/issues/24159 backend::StateBackendFor: - sp_api::StateBackend>, + sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 @@ -205,7 +206,7 @@ pub fn test_children_for_backend(backend: Arc) where B: backend::LocalBackend, // Rust bug: https://github.com/rust-lang/rust/issues/24159 >::State: - sp_api::StateBackend>, + sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 @@ -335,7 +336,7 @@ pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc, // Rust bug: https://github.com/rust-lang/rust/issues/24159 >::State: - sp_api::StateBackend>, + sp_api::StateBackend>, { // block tree: // G -> A1 -> A2 -> A3 -> A4 -> A5 diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 138e79cdd5c591c8a6c2535de1192c86e27afb7b..5ddeff390107a35dcdcf6bf3103ecb02275c71ca 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -25,7 +25,7 @@ pub mod system; use sp_std::{prelude::*, marker::PhantomData}; use codec::{Encode, Decode, Input, Error}; -use sp_core::{Blake2Hasher, OpaqueMetadata, RuntimeDebug, ChangesTrieConfiguration}; +use sp_core::{OpaqueMetadata, RuntimeDebug, ChangesTrieConfiguration}; use sp_application_crypto::{ed25519, sr25519, RuntimeAppPublic}; use trie_db::{TrieMut, Trie}; use sp_trie::PrefixedMemoryDB; @@ -101,7 +101,25 @@ impl Transfer { pub fn into_signed_tx(self) -> Extrinsic { let signature = sp_keyring::AccountKeyring::from_public(&self.from) .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer(self, signature) + Extrinsic::Transfer { + transfer: self, + signature, + exhaust_resources_when_not_first: false, + } + } + + /// Convert into a signed extrinsic, which will only end up included in the block + /// if it's the first transaction. Otherwise it will cause `ResourceExhaustion` error + /// which should be considered as block being full. + #[cfg(feature = "std")] + pub fn into_resources_exhausting_tx(self) -> Extrinsic { + let signature = sp_keyring::AccountKeyring::from_public(&self.from) + .expect("Creates keyring from public key.").sign(&self.encode()).into(); + Extrinsic::Transfer { + transfer: self, + signature, + exhaust_resources_when_not_first: true, + } } } @@ -109,7 +127,11 @@ impl Transfer { #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub enum Extrinsic { AuthoritiesChange(Vec), - Transfer(Transfer, AccountSignature), + Transfer { + transfer: Transfer, + signature: AccountSignature, + exhaust_resources_when_not_first: bool, + }, IncludeData(Vec), StorageChange(Vec, Option>), ChangesTrieConfigUpdate(Option), @@ -130,9 +152,9 @@ impl BlindCheckable for Extrinsic { fn check(self, _signature: CheckSignature) -> Result { match self { Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer(transfer, signature) => { + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { - Ok(Extrinsic::Transfer(transfer, signature)) + Ok(Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first }) } else { Err(InvalidTransaction::BadProof.into()) } @@ -165,7 +187,7 @@ impl ExtrinsicT for Extrinsic { impl Extrinsic { pub fn transfer(&self) -> &Transfer { match self { - Extrinsic::Transfer(ref transfer, _) => transfer, + Extrinsic::Transfer { ref transfer, .. } => transfer, _ => panic!("cannot convert to transfer ref"), } } @@ -374,7 +396,7 @@ impl frame_system::Trait for Runtime { type ModuleToIndex = (); type AccountData = (); type OnNewAccount = (); - type OnReapAccount = (); + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Runtime { @@ -419,7 +441,7 @@ fn code_using_trie() -> u64 { let mut root = sp_std::default::Default::default(); let _ = { let v = &pairs; - let mut t = TrieDBMut::::new(&mut mdb, &mut root); + let mut t = TrieDBMut::::new(&mut mdb, &mut root); for i in 0..v.len() { let key: &[u8]= &v[i].0; let val: &[u8] = &v[i].1; @@ -430,7 +452,7 @@ fn code_using_trie() -> u64 { t }; - if let Ok(trie) = TrieDB::::new(&mdb, &root) { + if let Ok(trie) = TrieDB::::new(&mdb, &root) { if let Ok(iter) = trie.iter() { let mut iter_pairs = Vec::new(); for pair in iter { diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index b410d317a1babaeb52443bb7de7e3bfaf01586c0..4ce774598f3b0f44a2fc87dcca712d5ea85b9087 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -192,7 +192,7 @@ pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let result = execute_transaction_backend(&utx); + let result = execute_transaction_backend(&utx, extrinsic_index); ExtrinsicData::insert(extrinsic_index, utx.encode()); storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); result @@ -237,7 +237,7 @@ pub fn finalize_block() -> Header { extrinsics_root, state_root: storage_root, parent_hash, - digest: digest, + digest, } } @@ -247,13 +247,18 @@ fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { utx.clone().check(CheckSignature::Yes).map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) } -fn execute_transaction_backend(utx: &Extrinsic) -> ApplyExtrinsicResult { +fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { check_signature(utx)?; match utx { - Extrinsic::Transfer(ref transfer, _) => execute_transfer_backend(transfer), - Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), + Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => + Err(InvalidTransaction::ExhaustsResources.into()), + Extrinsic::Transfer { ref transfer, .. } => + execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => + execute_new_authorities_backend(new_auth), Extrinsic::IncludeData(_) => Ok(Ok(())), - Extrinsic::StorageChange(key, value) => execute_storage_change(key, value.as_ref().map(|v| &**v)), + Extrinsic::StorageChange(key, value) => + execute_storage_change(key, value.as_ref().map(|v| &**v)), Extrinsic::ChangesTrieConfigUpdate(ref new_config) => execute_changes_trie_config_update(new_config.clone()), } @@ -401,7 +406,7 @@ mod tests { fn block_import_works_wasm() { block_import_works(|b, ext| { let mut ext = ext.ext(); - executor().call::<_, NeverNativeValue, fn() -> _>( + executor().call:: _>( &mut ext, "Core_execute_block", &b.encode(), @@ -494,7 +499,7 @@ mod tests { fn block_import_with_transaction_works_wasm() { block_import_with_transaction_works(|b, ext| { let mut ext = ext.ext(); - executor().call::<_, NeverNativeValue, fn() -> _>( + executor().call:: _>( &mut ext, "Core_execute_block", &b.encode(), diff --git a/test-utils/runtime/transaction-pool/Cargo.toml b/test-utils/runtime/transaction-pool/Cargo.toml index 72e81f5f19afa891e5e805b5057be41ac5cf05ee..3e22da468f1e39d914243d8aa3cdf44fdd98a658 100644 --- a/test-utils/runtime/transaction-pool/Cargo.toml +++ b/test-utils/runtime/transaction-pool/Cargo.toml @@ -1,17 +1,20 @@ [package] name = "substrate-test-runtime-transaction-pool" -version = "2.0.0" +version = "2.0.0-dev" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +publish = false [dependencies] -substrate-test-runtime-client = { version = "2.0.0", path = "../client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../client" } parking_lot = "0.10.0" -codec = { package = "parity-scale-codec", version = "1.0.0" } -sp-blockchain = { version = "2.0.0", path = "../../../primitives/blockchain" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } -sp-transaction-pool = { version = "2.0.0", path = "../../../primitives/transaction-pool" } -sc-transaction-graph = { version = "2.0.0", path = "../../../client/transaction-pool/graph" } +codec = { package = "parity-scale-codec", version = "1.2.0" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../primitives/blockchain" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../primitives/transaction-pool" } +sc-transaction-graph = { version = "2.0.0-alpha.2", path = "../../../client/transaction-pool/graph" } futures = { version = "0.3.1", features = ["compat"] } derive_more = "0.99.2" diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index aedc7dc4c37577cd32e345d8c37d30c36742559d..8cd4e58954b73c931f89b7178fee92896ce21c5e 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -265,7 +265,7 @@ pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { nonce, amount: 1, }; - let signature = transfer.using_encoded(|e| who.sign(e)); - Extrinsic::Transfer(transfer, signature.into()) + let signature = transfer.using_encoded(|e| who.sign(e)).into(); + Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: false } } diff --git a/utils/browser/Cargo.toml b/utils/browser/Cargo.toml index 7e101c438a881a5df54fbb73f10b8b632640708d..e06794c6ccbf31b22a02ef56acdeb6bf59eb036c 100644 --- a/utils/browser/Cargo.toml +++ b/utils/browser/Cargo.toml @@ -1,26 +1,28 @@ [package] -name = "browser-utils" -version = "0.8.0" +name = "substrate-browser-utils" +version = "0.8.0-alpha.3" authors = ["Parity Technologies "] description = "Utilities for creating a browser light-client." edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] futures = "0.3" futures01 = { package = "futures", version = "0.1.29" } log = "0.4.8" -libp2p = { version = "0.16.0", default-features = false } +libp2p = { version = "0.16.2", default-features = false } console_error_panic_hook = "0.1.6" console_log = "0.1.2" js-sys = "0.3.34" wasm-bindgen = "0.2.57" wasm-bindgen-futures = "0.4.7" kvdb-web = "0.4" -sc-informant = { version = "0.8", path = "../../client/informant" } -sc-service = { version = "0.8", path = "../../client/service", default-features = false } -sc-network = { path = "../../client/network" } -sc-chain-spec = { path = "../../client/chain-spec" } +sc-informant = { version = "0.8.0-alpha.2", path = "../../client/informant" } +sc-service = { version = "0.8.0-alpha.2", path = "../../client/service", default-features = false } +sc-network = { path = "../../client/network" , version = "0.8.0-alpha.2"} +sc-chain-spec = { path = "../../client/chain-spec" , version = "2.0.0-alpha.2"} # Imported just for the `no_cc` feature clear_on_drop = { version = "0.2.3", features = ["no_cc"] } diff --git a/utils/build-script-utils/Cargo.toml b/utils/build-script-utils/Cargo.toml index 8c94edd527be18057894f3d30bea934dd82b12d5..5903d107fbbe138b123e4c27aafe3b7dfecbbadb 100644 --- a/utils/build-script-utils/Cargo.toml +++ b/utils/build-script-utils/Cargo.toml @@ -1,8 +1,11 @@ [package] name = "substrate-build-script-utils" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Crate with utility functions for `build.rs` scripts." [dependencies] diff --git a/utils/fork-tree/Cargo.toml b/utils/fork-tree/Cargo.toml index 0ac0fb65220106d98e9cb085e5a3f27e6618e565..8ade20cd7b7e29c44900be94ae8fe06bb0f8cf49 100644 --- a/utils/fork-tree/Cargo.toml +++ b/utils/fork-tree/Cargo.toml @@ -1,9 +1,13 @@ [package] name = "fork-tree" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Utility library for managing tree-like ordered data with logic for pruning the tree while finalizing nodes." +documentation = "https://docs.rs/fork-tree" [dependencies] -codec = { package = "parity-scale-codec", version = "1.0.0", features = ["derive"] } +codec = { package = "parity-scale-codec", version = "1.2.0", features = ["derive"] } diff --git a/utils/frame/benchmarking-cli/Cargo.toml b/utils/frame/benchmarking-cli/Cargo.toml index 708ad1dd7e912fae749f092e2fbabf3878e5fe5a..cdaf8ae89195c2ad540174386ef2b14ab2246084 100644 --- a/utils/frame/benchmarking-cli/Cargo.toml +++ b/utils/frame/benchmarking-cli/Cargo.toml @@ -1,18 +1,21 @@ [package] name = "frame-benchmarking-cli" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "CLI for benchmarking FRAME" [dependencies] linregress = "0.1" +frame-benchmarking = { version = "2.0.0-alpha.2", path = "../../../frame/benchmarking" } +sc-service = { version = "0.8.0-alpha.2", path = "../../../client/service" } +sc-cli = { version = "0.8.0-alpha.2", path = "../../../client/cli" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../client" } +sc-client-db = { version = "0.8.0-alpha.2", path = "../../../client/db" } +sc-executor = { version = "0.8.0-alpha.2", path = "../../../client/executor" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../primitives/runtime" } structopt = "0.3.8" -codec = { version = "1.1.2", package = "parity-scale-codec" } -frame-benchmarking = { version = "2.0.0", path = "../../../frame/benchmarking" } -sc-service = { version = "0.8.0", path = "../../../client/service" } -sc-cli = { version = "0.8.0", path = "../../../client/cli" } -sc-client = { version = "0.8.0", path = "../../../client" } -sc-client-db = { version = "0.8.0", path = "../../../client/db" } -sc-executor = { version = "0.8.0", path = "../../../client/executor" } -sp-runtime = { version = "2.0.0", path = "../../../primitives/runtime" } +codec = { version = "1.2.0", package = "parity-scale-codec" } diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 0d781015071c3faa374e9a32ccbe76805a7ed85d..a1aed03e08a74512af7447b746cc2aa5024e90bf 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -16,9 +16,9 @@ use sp_runtime::{BuildStorage, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; use sc_client::StateMachine; -use sc_cli::{ExecutionStrategy, WasmExecutionMethod}; +use sc_cli::{ExecutionStrategy, WasmExecutionMethod, VersionInfo}; use sc_client_db::BenchmarkingState; -use sc_service::{RuntimeGenesis, ChainSpecExtension}; +use sc_service::{RuntimeGenesis, ChainSpecExtension, Configuration, ChainSpec}; use sc_executor::{NativeExecutor, NativeExecutionDispatch}; use std::fmt::Debug; use codec::{Encode, Decode}; @@ -40,8 +40,16 @@ pub struct BenchmarkCmd { pub extrinsic: String, /// Select how many samples we should take across the variable components. - #[structopt(short, long, default_value = "1")] - pub steps: u32, + #[structopt(short, long, use_delimiter = true)] + pub steps: Vec, + + /// Indicates lowest values for each of the component ranges. + #[structopt(long, use_delimiter = true)] + pub lowest_range_values: Vec, + + /// Indicates highest values for each of the component ranges. + #[structopt(long, use_delimiter = true)] + pub highest_range_values: Vec, /// Select how many repetitions of this benchmark should run. #[structopt(short, long, default_value = "1")] @@ -72,26 +80,16 @@ pub struct BenchmarkCmd { } impl BenchmarkCmd { - /// Parse CLI arguments and initialize given config. - pub fn init( - &self, - config: &mut sc_service::config::Configuration, - spec_factory: impl FnOnce(&str) -> Result>, String>, - version: &sc_cli::VersionInfo, - ) -> sc_cli::error::Result<()> where - G: sc_service::RuntimeGenesis, - E: sc_service::ChainSpecExtension, - { - sc_cli::init_config(config, &self.shared_params, version, spec_factory)?; - // make sure to configure keystore - sc_cli::fill_config_keystore_in_memory(config).map_err(Into::into) + /// Initialize + pub fn init(&self, version: &sc_cli::VersionInfo) -> sc_cli::Result<()> { + self.shared_params.init(version) } /// Runs the command and benchmarks the chain. pub fn run( self, - config: sc_service::Configuration, - ) -> sc_cli::error::Result<()> + config: Configuration, + ) -> sc_cli::Result<()> where G: RuntimeGenesis, E: ChainSpecExtension, @@ -111,52 +109,81 @@ impl BenchmarkCmd { wasm_method, None, // heap pages ); + let result = StateMachine::<_, _, NumberFor, _>::new( &state, None, &mut changes, &executor, "Benchmark_dispatch_benchmark", - &(&self.pallet, &self.extrinsic, self.steps, self.repeat).encode(), + &( + &self.pallet, + &self.extrinsic, + self.lowest_range_values.clone(), + self.highest_range_values.clone(), + self.steps.clone(), + self.repeat, + ).encode(), Default::default(), ) - .execute(strategy.into()) - .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - let results = > as Decode>::decode(&mut &result[..]) - .unwrap_or(None); - - if let Some(results) = results { - // Print benchmark metadata - println!( - "Pallet: {:?}, Extrinsic: {:?}, Steps: {:?}, Repeat: {:?}", - self.pallet, - self.extrinsic, - self.steps, - self.repeat, - ); - - println!("Data:"); - - // Print the table header - results[0].0.iter().for_each(|param| print!("{:?},", param.0)); + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; + + let results = , String> as Decode>::decode(&mut &result[..]) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; + + match results { + Ok(results) => { + // Print benchmark metadata + println!( + "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", + self.pallet, + self.extrinsic, + self.lowest_range_values, + self.highest_range_values, + self.steps, + self.repeat, + ); + + // Print the table header + results[0].0.iter().for_each(|param| print!("{:?},", param.0)); + + print!("extrinsic_time,storage_root_time\n"); + // Print the values + results.iter().for_each(|result| { + let parameters = &result.0; + parameters.iter().for_each(|param| print!("{:?},", param.1)); + // Print extrinsic time and storage root time + print!("{:?},{:?}\n", result.1, result.2); + }); + + // Conduct analysis. + if let Some(analysis) = Analysis::median_slopes(&results) { + println!("Analysis\n========\n{}", analysis); + } + + eprintln!("Done."); + } + Err(error) => eprintln!("Error: {:?}", error), + } - print!("time\n"); - // Print the values - results.iter().for_each(|result| { - let parameters = &result.0; - parameters.iter().for_each(|param| print!("{:?},", param.1)); - print!("{:?}\n", result.1); - }); + Ok(()) + } - // Conduct analysis. - if let Some(analysis) = Analysis::median_slopes(&results) { - println!("Analysis\n========\n{}", analysis); - } + /// Update and prepare a `Configuration` with command line parameters + pub fn update_config( + &self, + mut config: &mut Configuration, + spec_factory: impl FnOnce(&str) -> Result>, String>, + version: &VersionInfo, + ) -> sc_cli::Result<()> where + G: RuntimeGenesis, + E: ChainSpecExtension, + { + self.shared_params.update_config(&mut config, spec_factory, version)?; - eprintln!("Done."); - } else { - eprintln!("No Results."); - } + // make sure to configure keystore + config.use_in_memory_keystore()?; Ok(()) } diff --git a/utils/frame/rpc/support/Cargo.toml b/utils/frame/rpc/support/Cargo.toml index 3b4339496a99192ad5e34d672680e375634c86f3..162d25cf9bfcddebb1691d6ff54db1daac76a3fc 100644 --- a/utils/frame/rpc/support/Cargo.toml +++ b/utils/frame/rpc/support/Cargo.toml @@ -1,9 +1,12 @@ [package] name = "substrate-frame-rpc-support" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies ", "Andrew Dirksen "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "Substrate RPC for FRAME's support" [dependencies] futures = { version = "0.3.0", features = ["compat"] } @@ -11,10 +14,10 @@ jsonrpc-client-transports = "14" jsonrpc-core = "14" codec = { package = "parity-scale-codec", version = "1" } serde = "1" -frame-support = { version = "2.0.0", path = "../../../../frame/support" } -sp-storage = { version = "2.0.0", path = "../../../../primitives/storage" } -sc-rpc-api = { version = "0.8", path = "../../../../client/rpc-api" } +frame-support = { version = "2.0.0-alpha.2", path = "../../../../frame/support" } +sp-storage = { version = "2.0.0-alpha.2", path = "../../../../primitives/storage" } +sc-rpc-api = { version = "0.8.0-alpha.2", path = "../../../../client/rpc-api" } [dev-dependencies] -frame-system = { version = "2.0.0", path = "../../../../frame/system" } -tokio = "0.1" +frame-system = { version = "2.0.0-alpha.2", path = "../../../../frame/system" } +tokio = "0.2" diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index a9982945e7f73eefbc0977b0bbc228e8a56ada69..42c10fb2cc2e843afd544c5b50aea0aabb6aa367 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -33,9 +33,7 @@ use sc_rpc_api::state::StateClient; /// A typed query on chain state usable from an RPC client. /// /// ```no_run -/// # use futures::compat::Compat; /// # use futures::compat::Future01CompatExt; -/// # use futures::future::FutureExt; /// # use jsonrpc_client_transports::RpcError; /// # use jsonrpc_client_transports::transports::http; /// # use codec::Encode; @@ -49,7 +47,7 @@ use sc_rpc_api::state::StateClient; /// # type Hash = (); /// # /// # fn main() -> Result<(), RpcError> { -/// # tokio::runtime::Runtime::new().unwrap().block_on(Compat::new(test().boxed())) +/// # tokio::runtime::Runtime::new().unwrap().block_on(test()) /// # } /// # /// # struct TestRuntime; diff --git a/utils/frame/rpc/system/Cargo.toml b/utils/frame/rpc/system/Cargo.toml index 818794f8469fc23e822f99ea6b9360d58634e415..8b1c62ccd70e2c2290ec29a56156d3d61aac9474 100644 --- a/utils/frame/rpc/system/Cargo.toml +++ b/utils/frame/rpc/system/Cargo.toml @@ -1,27 +1,30 @@ [package] name = "substrate-frame-rpc-system" -version = "2.0.0" +version = "2.0.0-alpha.3" authors = ["Parity Technologies "] edition = "2018" license = "GPL-3.0" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" +description = "FRAME's system exposed over Substrate RPC" [dependencies] -sc-client = { version = "0.8", path = "../../../../client/" } -codec = { package = "parity-scale-codec", version = "1.0.0" } +sc-client = { version = "0.8.0-alpha.2", path = "../../../../client/" } +codec = { package = "parity-scale-codec", version = "1.2.0" } futures = "0.3.1" jsonrpc-core = "14.0.3" jsonrpc-core-client = "14.0.3" jsonrpc-derive = "14.0.3" log = "0.4.8" serde = { version = "1.0.101", features = ["derive"] } -sp-runtime = { version = "2.0.0", path = "../../../../primitives/runtime" } -sp-api = { version = "2.0.0", path = "../../../../primitives/api" } -frame-system-rpc-runtime-api = { version = "2.0.0", path = "../../../../frame/system/rpc/runtime-api" } -sp-core = { version = "2.0.0", path = "../../../../primitives/core" } -sp-blockchain = { version = "2.0.0", path = "../../../../primitives/blockchain" } -sp-transaction-pool = { version = "2.0.0", path = "../../../../primitives/transaction-pool" } +sp-runtime = { version = "2.0.0-alpha.2", path = "../../../../primitives/runtime" } +sp-api = { version = "2.0.0-alpha.2", path = "../../../../primitives/api" } +frame-system-rpc-runtime-api = { version = "2.0.0-alpha.2", path = "../../../../frame/system/rpc/runtime-api" } +sp-core = { version = "2.0.0-alpha.2", path = "../../../../primitives/core" } +sp-blockchain = { version = "2.0.0-alpha.2", path = "../../../../primitives/blockchain" } +sp-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../../primitives/transaction-pool" } [dev-dependencies] -substrate-test-runtime-client = { version = "2.0.0", path = "../../../../test-utils/runtime/client" } +substrate-test-runtime-client = { version = "2.0.0-dev", path = "../../../../test-utils/runtime/client" } env_logger = "0.7.0" -sc-transaction-pool = { version = "2.0.0", path = "../../../../client/transaction-pool" } +sc-transaction-pool = { version = "2.0.0-alpha.2", path = "../../../../client/transaction-pool" } diff --git a/utils/grafana-data-source/src/database.rs b/utils/grafana-data-source/src/database.rs deleted file mode 100644 index f20917cf785d4a27a8b60a7e57e77eccd6a93658..0000000000000000000000000000000000000000 --- a/utils/grafana-data-source/src/database.rs +++ /dev/null @@ -1,154 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use std::collections::HashMap; -use std::convert::TryFrom; -use crate::Error; - -pub struct Database { - base_timestamp: i64, - storage: HashMap> -} - -impl Database { - /// Create a new Database. - pub fn new() -> Self { - Self { - base_timestamp: now_millis(), - storage: HashMap::new() - } - } - - /// Produce an iterator for keys starting with a base string. - pub fn keys_starting_with<'a>(&'a self, base: &'a str) -> impl Iterator + 'a { - self.storage.keys() - .filter(move |key| key.starts_with(base)) - .cloned() - } - - /// Select `max_datapoints` datapoints that have been added between `from` and `to`. - pub fn datapoints_between(&self, key: &str, from: i64, to: i64, max_datapoints: usize) -> Option> { - self.storage.get(key) - .map(|vec| { - let from = find_index(vec, self.base_timestamp, from); - let to = find_index(vec, self.base_timestamp, to); - let slice = &vec[from .. to]; - - if max_datapoints == 0 { - Vec::new() - } else if max_datapoints >= slice.len() { - // Just convert the slice as-is - slice.iter() - .map(|dp| dp.make_absolute(self.base_timestamp)) - .collect() - } else { - // We have more datapoints than we need, so we need to skip some - (0 .. max_datapoints - 1) - .map(|i| &slice[i * slice.len() / (max_datapoints - 1)]) - .chain(slice.last()) - .map(|dp| dp.make_absolute(self.base_timestamp)) - .collect() - } - }) - } - - /// Push a new datapoint. Will error if the base timestamp hasn't been updated in `2^32` - /// milliseconds (49 days). - pub fn push(&mut self, key: &str, value: f32) -> Result<(), Error> { - self.storage.entry(key.into()) - .or_insert_with(Vec::new) - .push(Datapoint::new(self.base_timestamp, value)?); - - Ok(()) - } - - /// Set a new base timestamp, and remove metrics older than this new timestamp. Errors if the - /// difference between timestamps is greater than `2^32` milliseconds (49 days). - pub fn truncate(&mut self, new_base_timestamp: i64) -> Result<(), Error> { - // Ensure that the new base is older. - if self.base_timestamp >= new_base_timestamp { - return Ok(()); - } - - // If the old base timestamp was too long ago, the - let delta = u32::try_from(new_base_timestamp - self.base_timestamp) - .map_err(Error::Timestamp)?; - - for metric in self.storage.values_mut() { - // Find the index of the oldest allowed timestamp and cut out all those before it. - let index = find_index(&metric, self.base_timestamp, new_base_timestamp); - - *metric = metric.iter_mut() - .skip(index) - .map(|dp| { - dp.delta_timestamp -= delta; - *dp - }) - .collect(); - } - - self.base_timestamp = new_base_timestamp; - - Ok(()) - } -} - -#[derive(Clone, Copy)] -struct Datapoint { - delta_timestamp: u32, - value: f32 -} - -impl Datapoint { - fn new(base_timestamp: i64, value: f32) -> Result { - Ok(Self { - delta_timestamp: u32::try_from(now_millis() - base_timestamp) - .map_err(Error::Timestamp)?, - value - }) - } - - fn make_absolute(self, base_timestamp: i64) -> (f32, i64) { - (self.value, base_timestamp + self.delta_timestamp as i64) - } -} - -fn find_index(slice: &[Datapoint], base_timestamp: i64, timestamp: i64) -> usize { - slice.binary_search_by_key(×tamp, |datapoint| { - base_timestamp + datapoint.delta_timestamp as i64 - }).unwrap_or_else(|index| index) -} - -/// Get the current unix timestamp in milliseconds. -fn now_millis() -> i64 { - chrono::Utc::now().timestamp_millis() -} - -#[test] -fn test() { - let mut database = Database::new(); - - database.push("test", 1.0).unwrap(); - database.push("test", 2.5).unwrap(); - database.push("test", 2.0).unwrap(); - database.push("test 2", 1.0).unwrap(); - - let mut keys: Vec<_> = database.keys_starting_with("test").collect(); - keys.sort(); - - assert_eq!(keys, ["test", "test 2"]); - assert_eq!(database.keys_starting_with("test ").collect::>(), ["test 2"]); -} diff --git a/utils/grafana-data-source/src/lib.rs b/utils/grafana-data-source/src/lib.rs deleted file mode 100644 index bc40fc39bbed6be0478b64ce1e637c112e650619..0000000000000000000000000000000000000000 --- a/utils/grafana-data-source/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -//! [Grafana] data source server -//! -//! To display node statistics with [Grafana], this module exposes a `run_server` function that -//! starts up a HTTP server that conforms to the [`grafana-json-data-source`] API. The -//! `record_metrics` macro can be used to pass metrics to this server. -//! -//! [Grafana]: https://grafana.com/ -//! [`grafana-json-data-source`]: https://github.com/simPod/grafana-json-datasource - -#![warn(missing_docs)] - -use lazy_static::lazy_static; -use parking_lot::RwLock; - -mod types; -mod server; -#[cfg(not(target_os = "unknown"))] -mod networking; -mod database; - -use database::Database; -pub use server::run_server; -use std::num::TryFromIntError; - -lazy_static! { - // The `RwLock` wrapping the metrics database. - static ref DATABASE: RwLock = RwLock::new(Database::new()); -} - -/// Write metrics to `METRICS`. -#[macro_export] -macro_rules! record_metrics( - ($($key:expr => $value:expr,)*) => { - if cfg!(not(target_os = "unknown")) { - $crate::record_metrics_slice(&[ - $( ($key, $value as f32), )* - ]) - } else { - Ok(()) - } - } -); - -/// Write metrics to `METRICS` as a slice. Intended to be only used via `record_metrics!`. -pub fn record_metrics_slice(metrics: &[(&str, f32)]) -> Result<(), Error> { - let mut database = crate::DATABASE.write(); - - for &(key, value) in metrics.iter() { - database.push(key, value)?; - } - - Ok(()) -} - -/// Error type that can be returned by either `record_metrics` or `run_server`. -#[derive(Debug, derive_more::Display, derive_more::From)] -pub enum Error { - /// Hyper internal error. - #[cfg(not(target_os = "unknown"))] - Hyper(hyper::Error), - /// Http request error. - #[cfg(not(target_os = "unknown"))] - Http(hyper::http::Error), - /// Serialization/deserialization error. - Serde(serde_json::Error), - /// Timestamp error. - Timestamp(TryFromIntError), - /// i/o error. - Io(std::io::Error) -} - -impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - #[cfg(not(target_os = "unknown"))] - Error::Hyper(error) => Some(error), - #[cfg(not(target_os = "unknown"))] - Error::Http(error) => Some(error), - Error::Serde(error) => Some(error), - Error::Timestamp(error) => Some(error), - Error::Io(error) => Some(error) - } - } -} diff --git a/utils/grafana-data-source/src/server.rs b/utils/grafana-data-source/src/server.rs deleted file mode 100644 index f2f06f76888703199ff1e1906558d193c89654da..0000000000000000000000000000000000000000 --- a/utils/grafana-data-source/src/server.rs +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use serde::{Serialize, de::DeserializeOwned}; -use chrono::{Duration, Utc}; -use futures_util::{FutureExt, TryStreamExt, future::{Future, select, Either}}; -use futures_timer::Delay; -use crate::{DATABASE, Error, types::{Target, Query, TimeseriesData, Range}}; - -#[cfg(not(target_os = "unknown"))] -use hyper::{Body, Request, Response, header, service::{service_fn, make_service_fn}, Server}; - -#[cfg(not(target_os = "unknown"))] -async fn api_response(req: Request) -> Result, Error> { - match req.uri().path() { - "/search" => { - map_request_to_response(req, |target: Target| { - // Filter and return metrics relating to the target - DATABASE.read() - .keys_starting_with(&target.target) - .collect::>() - }).await - }, - "/query" => { - map_request_to_response(req, |query: Query| { - let metrics = DATABASE.read(); - - let Query { - range: Range { from, to }, - max_datapoints, .. - } = query; - - // Return timeseries data related to the specified metrics - query.targets.iter() - .map(|target| { - let datapoints = metrics.datapoints_between(&target.target, from, to, max_datapoints) - .unwrap_or_else(Vec::new); - - TimeseriesData { - target: target.target.clone(), datapoints - } - }) - .collect::>() - }).await - }, - _ => Ok(Response::new(Body::empty())), - } -} - -#[cfg(not(target_os = "unknown"))] -async fn map_request_to_response(req: Request, transformation: T) -> Result, Error> - where - Req: DeserializeOwned, - Res: Serialize, - T: Fn(Req) -> Res + Send + Sync + 'static -{ - let body = req.into_body() - .map_ok(|bytes| bytes.to_vec()) - .try_concat() - .await - .map_err(Error::Hyper)?; - - let req = serde_json::from_slice(body.as_ref()).map_err(Error::Serde)?; - let res = transformation(req); - let string = serde_json::to_string(&res).map_err(Error::Serde)?; - - Response::builder() - .header(header::CONTENT_TYPE, "application/json") - .body(Body::from(string)) - .map_err(Error::Http) -} - -/// Given that we're not using hyper's tokio feature, we need to define out own executor. -#[derive(Clone)] -pub struct Executor; - -#[cfg(not(target_os = "unknown"))] -impl hyper::rt::Executor for Executor - where - T: Future + Send + 'static, - T::Output: Send + 'static, -{ - fn execute(&self, future: T) { - async_std::task::spawn(future); - } -} - -/// Start the data source server. -#[cfg(not(target_os = "unknown"))] -pub async fn run_server(mut address: std::net::SocketAddr) -> Result<(), Error> { - use async_std::{net, io}; - use crate::networking::Incoming; - - let listener = loop { - let listener = net::TcpListener::bind(&address).await; - match listener { - Ok(listener) => { - log::info!("Grafana data source server started at {}", address); - break listener - }, - Err(err) => match err.kind() { - io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied if address.port() != 0 => { - log::warn!( - "Unable to bind grafana data source server to {}. Trying random port.", - address - ); - address.set_port(0); - continue; - }, - _ => return Err(err.into()), - } - } - }; - - let service = make_service_fn(|_| { - async { - Ok::<_, Error>(service_fn(api_response)) - } - }); - - let server = Server::builder(Incoming(listener.incoming())) - .executor(Executor) - .serve(service) - .boxed(); - - let every = std::time::Duration::from_secs(24 * 3600); - let clean = clean_up(every, Duration::weeks(1)) - .boxed(); - - let result = match select(server, clean).await { - Either::Left((result, _)) => result.map_err(Into::into), - Either::Right((result, _)) => result - }; - - result -} - -#[cfg(target_os = "unknown")] -pub async fn run_server(_: std::net::SocketAddr) -> Result<(), Error> { - Ok(()) -} - -/// Periodically remove old metrics. -async fn clean_up(every: std::time::Duration, before: Duration) -> Result<(), Error> { - loop { - Delay::new(every).await; - - let oldest_allowed = (Utc::now() - before).timestamp_millis(); - DATABASE.write().truncate(oldest_allowed)?; - } -} diff --git a/utils/grafana-data-source/src/types.rs b/utils/grafana-data-source/src/types.rs deleted file mode 100644 index 960fc787e3eb2ab05d6a8da922823ffdf3e0c2b7..0000000000000000000000000000000000000000 --- a/utils/grafana-data-source/src/types.rs +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2019-2020 Parity Technologies (UK) Ltd. -// This file is part of Substrate. - -// Substrate is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. - -// Substrate is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. - -// You should have received a copy of the GNU General Public License -// along with Substrate. If not, see . - -use serde::{Serialize, Deserialize}; - -#[derive(Serialize, Deserialize)] -pub struct Target { - pub target: String, -} - -#[derive(Serialize, Deserialize)] -pub struct Query { - #[serde(rename = "maxDataPoints")] - pub max_datapoints: usize, - pub targets: Vec, - pub range: Range, -} - -#[derive(Serialize, Deserialize)] -pub struct Range { - #[serde(deserialize_with = "date_to_timestamp_ms")] - pub from: i64, - #[serde(deserialize_with = "date_to_timestamp_ms")] - pub to: i64, -} - -// Deserialize a timestamp via a `DateTime` -fn date_to_timestamp_ms<'de, D: serde::Deserializer<'de>>(timestamp: D) -> Result { - Deserialize::deserialize(timestamp) - .map(|date: chrono::DateTime| date.timestamp_millis()) -} - -#[derive(Serialize, Deserialize)] -pub struct TimeseriesData { - pub target: String, - pub datapoints: Vec<(f32, i64)> -} diff --git a/utils/grafana-data-source/test/Cargo.toml b/utils/grafana-data-source/test/Cargo.toml deleted file mode 100644 index 18c080c8d1f71468e6baaa328300f4d80f2d5f1f..0000000000000000000000000000000000000000 --- a/utils/grafana-data-source/test/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -description = "Grafana data source server test" -name = "grafana-data-source-test" -version = "2.0.0" -license = "GPL-3.0" -authors = ["Parity Technologies "] -edition = "2018" - -[dependencies] -grafana-data-source = { version = "0.8", path = ".." } -futures = "0.3" -futures-timer = "3.0.1" -rand = "0.7" diff --git a/utils/grafana-data-source/Cargo.toml b/utils/prometheus/Cargo.toml similarity index 61% rename from utils/grafana-data-source/Cargo.toml rename to utils/prometheus/Cargo.toml index c49ee963f0c87165a24e28b5d9a85bbbca0f3807..bc7750a72083fa59249db011549c8d63b777db59 100644 --- a/utils/grafana-data-source/Cargo.toml +++ b/utils/prometheus/Cargo.toml @@ -1,20 +1,17 @@ [package] -description = "Grafana data source server" -name = "grafana-data-source" -version = "0.8.0" +description = "Endpoint to expose Prometheus metrics" +name = "substrate-prometheus-endpoint" +version = "0.8.0-alpha.3" license = "GPL-3.0" authors = ["Parity Technologies "] edition = "2018" +homepage = "https://substrate.dev" +repository = "https://github.com/paritytech/substrate/" [dependencies] log = "0.4.8" +prometheus = "0.7" futures-util = { version = "0.3.1", default-features = false, features = ["io"] } -serde_json = "1" -serde = { version = "1", features = ["derive"] } -chrono = { version = "0.4", features = ["serde"] } -lazy_static = "1.4" -parking_lot = "0.10.0" -futures-timer = "3.0.1" derive_more = "0.99" [target.'cfg(not(target_os = "unknown"))'.dependencies] diff --git a/utils/prometheus/README.md b/utils/prometheus/README.md new file mode 100644 index 0000000000000000000000000000000000000000..9dd0882105c698e382c8d35b31244ca06e3127e1 --- /dev/null +++ b/utils/prometheus/README.md @@ -0,0 +1,16 @@ +# Substrate Prometheus Exporter + +## Introduction + +[Prometheus](https://prometheus.io/) is one of the most widely used monitoring tools for managing highly available services supported by [Cloud Native Computing Foundation](https://www.cncf.io/). By providing Prometheus metrics in Substrate, node operators can easily adopt widely used display/alert tools such +as [Grafana](https://grafana.com/) and [Alertmanager](https://prometheus.io/docs/alerting/alertmanager/). Easy access to such monitoring tools will benefit parachain developers/operators and validators to have much higher availability of their services. + +Metrics will be served under `/metrics` on TCP port 9615 by default. + +## Quick Start + +1. From the root of the repository start Substrate `cargo run --release`. + +2. In another terminal run `curl localhost:9615/metrics` to retrieve the metrics. + +To learn how to configure Prometheus see the Prometheus [Getting Started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide. \ No newline at end of file diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs new file mode 100644 index 0000000000000000000000000000000000000000..54b9183bc63929859631e5aba7a02e87ac026e8a --- /dev/null +++ b/utils/prometheus/src/lib.rs @@ -0,0 +1,144 @@ +// Copyright 2019 Parity Technologies (UK) Ltd. +// This file is part of Substrate. + +// Substrate is free software: you can redistribute it and/or modify +// it under the terms of the GNU General Public License as published by +// the Free Software Foundation, either version 3 of the License, or +// (at your option) any later version. + +// Substrate is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU General Public License for more details. + +// You should have received a copy of the GNU General Public License +// along with Substrate. If not, see . + +use futures_util::{FutureExt, future::Future}; +pub use prometheus::{ + Registry, Error as PrometheusError, Opts, + core::{ + GenericGauge as Gauge, GenericCounter as Counter, + GenericGaugeVec as GaugeVec, GenericCounterVec as CounterVec, + AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, + } +}; +use prometheus::{Encoder, TextEncoder, core::Collector}; +use std::net::SocketAddr; + +#[cfg(not(target_os = "unknown"))] +mod networking; + +#[cfg(target_os = "unknown")] +pub use unknown_os::init_prometheus; +#[cfg(not(target_os = "unknown"))] +pub use known_os::init_prometheus; + +pub fn register(metric: T, registry: &Registry) -> Result { + registry.register(Box::new(metric.clone()))?; + Ok(metric) +} + +// On WASM `init_prometheus` becomes a no-op. +#[cfg(target_os = "unknown")] +mod unknown_os { + use super::*; + + pub enum Error {} + + pub async fn init_prometheus(_: SocketAddr, _registry: Registry) -> Result<(), Error> { + Ok(()) + } +} + +#[cfg(not(target_os = "unknown"))] +mod known_os { + use super::*; + use hyper::http::StatusCode; + use hyper::{Server, Body, Request, Response, service::{service_fn, make_service_fn}}; + + #[derive(Debug, derive_more::Display, derive_more::From)] + pub enum Error { + /// Hyper internal error. + Hyper(hyper::Error), + /// Http request error. + Http(hyper::http::Error), + /// i/o error. + Io(std::io::Error), + #[display(fmt = "Prometheus port {} already in use.", _0)] + PortInUse(SocketAddr) + } + + impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Hyper(error) => Some(error), + Error::Http(error) => Some(error), + Error::Io(error) => Some(error), + Error::PortInUse(_) => None + } + } + } + + async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { + if req.uri().path() == "/metrics" { + let metric_families = registry.gather(); + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + + Response::builder().status(StatusCode::OK) + .header("Content-Type", encoder.format_type()) + .body(Body::from(buffer)) + .map_err(Error::Http) + } else { + Response::builder().status(StatusCode::NOT_FOUND) + .body(Body::from("Not found.")) + .map_err(Error::Http) + } + + } + + #[derive(Clone)] + pub struct Executor; + + impl hyper::rt::Executor for Executor + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + fn execute(&self, future: T) { + async_std::task::spawn(future); + } + } + + /// Initializes the metrics context, and starts an HTTP server + /// to serve metrics. + pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error>{ + use networking::Incoming; + let listener = async_std::net::TcpListener::bind(&prometheus_addr) + .await + .map_err(|_| Error::PortInUse(prometheus_addr))?; + + log::info!("Prometheus server started at {}", prometheus_addr); + + let service = make_service_fn(move |_| { + let registry = registry.clone(); + + async move { + Ok::<_, hyper::Error>(service_fn(move |req: Request| { + request_metrics(req, registry.clone()) + })) + } + }); + + let server = Server::builder(Incoming(listener.incoming())) + .executor(Executor) + .serve(service) + .boxed(); + + let result = server.await.map_err(Into::into); + + result + } +} diff --git a/utils/grafana-data-source/src/networking.rs b/utils/prometheus/src/networking.rs similarity index 100% rename from utils/grafana-data-source/src/networking.rs rename to utils/prometheus/src/networking.rs diff --git a/utils/wasm-builder-runner/Cargo.toml b/utils/wasm-builder-runner/Cargo.toml index 1380d64fb30055e6f3cb4ac1073560e21a32552d..8a41fe98b2728efee0a0e71c946853f4adde7be2 100644 --- a/utils/wasm-builder-runner/Cargo.toml +++ b/utils/wasm-builder-runner/Cargo.toml @@ -5,7 +5,8 @@ authors = ["Parity Technologies "] description = "Runner for substrate-wasm-builder" edition = "2018" readme = "README.md" -repository = "https://github.com/paritytech/substrate" +repository = "https://github.com/paritytech/substrate/" license = "GPL-3.0" +homepage = "https://substrate.dev" [dependencies] diff --git a/utils/wasm-builder/Cargo.toml b/utils/wasm-builder/Cargo.toml index 5941a39ffb6a6281bac743258222d09c3da5ed35..1aac8913939b1253e66ad50e53dd18d3d1177cd8 100644 --- a/utils/wasm-builder/Cargo.toml +++ b/utils/wasm-builder/Cargo.toml @@ -5,8 +5,9 @@ authors = ["Parity Technologies "] description = "Utility for building WASM binaries" edition = "2018" readme = "README.md" -repository = "https://github.com/paritytech/substrate" +repository = "https://github.com/paritytech/substrate/" license = "GPL-3.0" +homepage = "https://substrate.dev" [dependencies] build-helper = "0.1.1" @@ -17,3 +18,4 @@ walkdir = "2.2.9" fs2 = "0.4.3" wasm-gc-api = "0.1.11" atty = "0.2.13" +itertools = "0.8.2" diff --git a/utils/wasm-builder/README.md b/utils/wasm-builder/README.md index 2fd9a6ab4cf80c9886a687474eacafbd2d6d5bd7..5f4ca615d5065dca62e1d9c7d0463975c52acb2c 100644 --- a/utils/wasm-builder/README.md +++ b/utils/wasm-builder/README.md @@ -50,6 +50,8 @@ By using environment variables, you can configure which WASM binaries are built - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs to be absolute. +- `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The + format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will @@ -61,5 +63,8 @@ WASM builder requires the following prerequisites for building the WASM binary: - rust nightly + `wasm32-unknown-unknown` toolchain +If a specific rust nightly is installed with `rustup`, it is important that the wasm target is installed +as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, +the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. License: GPL-3.0 diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 8500eba4a01f45146ef8a7c74143544bb369333d..195527a122725129e0a23058fe675dcc72e4320a 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -66,6 +66,8 @@ //! - `WASM_BUILD_NO_COLOR` - Disable color output of the wasm build. //! - `WASM_TARGET_DIRECTORY` - Will copy any build wasm binary to the given directory. The path needs //! to be absolute. +//! - `WASM_BUILD_TOOLCHAIN` - The toolchain that should be used to build the wasm binaries. The +//! format needs to be the same as used by cargo, e.g. `nightly-2020-02-20`. //! //! Each project can be skipped individually by using the environment variable `SKIP_PROJECT_NAME_WASM_BUILD`. //! Where `PROJECT_NAME` needs to be replaced by the name of the cargo project, e.g. `node-runtime` will @@ -77,8 +79,11 @@ //! //! - rust nightly + `wasm32-unknown-unknown` toolchain //! +//! If a specific rust nightly is installed with `rustup`, it is important that the wasm target is installed +//! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, +//! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. -use std::{env, fs, path::PathBuf, process::{Command, Stdio, self}}; +use std::{env, fs, path::PathBuf, process::{Command, self}, io::BufRead}; mod prerequisites; mod wasm_project; @@ -103,6 +108,9 @@ const WASM_TARGET_DIRECTORY: &str = "WASM_TARGET_DIRECTORY"; /// Environment variable to disable color output of the wasm build. const WASM_BUILD_NO_COLOR: &str = "WASM_BUILD_NO_COLOR"; +/// Environment variable to set the toolchain used to compile the wasm binary. +const WASM_BUILD_TOOLCHAIN: &str = "WASM_BUILD_TOOLCHAIN"; + /// Build the currently built project as wasm binary. /// /// The current project is determined by using the `CARGO_MANIFEST_DIR` environment variable. @@ -178,19 +186,56 @@ fn write_file_if_changed(file: PathBuf, content: String) { /// Get a cargo command that compiles with nightly fn get_nightly_cargo() -> CargoCommand { + let env_cargo = CargoCommand::new( + &env::var("CARGO").expect("`CARGO` env variable is always set by cargo"), + ); let default_cargo = CargoCommand::new("cargo"); - let mut rustup_run_nightly = CargoCommand::new("rustup"); - rustup_run_nightly.args(&["run", "nightly", "cargo"]); + let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); + let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); - if default_cargo.is_nightly() { + // First check if the user requested a specific toolchain + if let Some(cmd) = wasm_toolchain.and_then(|t| get_rustup_nightly(Some(t))) { + cmd + } else if env_cargo.is_nightly() { + env_cargo + } else if default_cargo.is_nightly() { default_cargo - } else if rustup_run_nightly.works() { + } else if rustup_run_nightly.is_nightly() { rustup_run_nightly } else { - default_cargo + // If no command before provided us with a nightly compiler, we try to search one + // with rustup. If that fails as well, we return the default cargo and let the prequisities + // check fail. + get_rustup_nightly(None).unwrap_or(default_cargo) } } +/// Get a nightly from rustup. If `selected` is `Some(_)`, a `CargoCommand` using the given +/// nightly is returned. +fn get_rustup_nightly(selected: Option) -> Option { + let host = format!("-{}", env::var("HOST").expect("`HOST` is always set by cargo")); + + let version = match selected { + Some(selected) => selected, + None => { + let output = Command::new("rustup").args(&["toolchain", "list"]).output().ok()?.stdout; + let lines = output.as_slice().lines(); + + let mut latest_nightly = None; + for line in lines.filter_map(|l| l.ok()) { + if line.starts_with("nightly-") && line.ends_with(&host) { + // Rustup prints them sorted + latest_nightly = Some(line.clone()); + } + } + + latest_nightly?.trim_end_matches(&host).into() + } + }; + + Some(CargoCommand::new_with_args("rustup", &["run", &version, "cargo"])) +} + /// Builder for cargo commands #[derive(Debug)] struct CargoCommand { @@ -203,14 +248,11 @@ impl CargoCommand { CargoCommand { program: program.into(), args: Vec::new() } } - fn arg(&mut self, arg: &str) -> &mut Self { - self.args.push(arg.into()); - self - } - - fn args(&mut self, args: &[&str]) -> &mut Self { - args.into_iter().for_each(|a| { self.arg(a); }); - self + fn new_with_args(program: &str, args: &[&str]) -> Self { + CargoCommand { + program: program.into(), + args: args.iter().map(ToString::to_string).collect(), + } } fn command(&self) -> Command { @@ -219,14 +261,6 @@ impl CargoCommand { cmd } - fn works(&self) -> bool { - self.command() - .stdout(Stdio::null()) - .stderr(Stdio::null()) - .status() - .map(|s| s.success()).unwrap_or(false) - } - /// Check if the supplied cargo command is a nightly version fn is_nightly(&self) -> bool { // `RUSTC_BOOTSTRAP` tells a stable compiler to behave like a nightly. So, when this env diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 60be4684ba893c34527faba2cb889478354d2840..1e6d7fa463d5342e5df788354a76741ef37ede02 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -28,6 +28,8 @@ use walkdir::WalkDir; use fs2::FileExt; +use itertools::Itertools; + /// Holds the path to the bloaty WASM binary. pub struct WasmBinaryBloaty(PathBuf); @@ -87,8 +89,13 @@ pub fn create_and_compile( // Lock the workspace exclusively for us let _lock = WorkspaceLock::new(&wasm_workspace_root); - let project = create_project(cargo_manifest, &wasm_workspace); - create_wasm_workspace_project(&wasm_workspace, cargo_manifest); + let crate_metadata = MetadataCommand::new() + .manifest_path(cargo_manifest) + .exec() + .expect("`cargo metadata` can not fail on project `Cargo.toml`; qed"); + + let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); + create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); build_project(&project, default_rustflags); let (wasm_binary, bloaty) = compact_wasm_file( @@ -232,15 +239,9 @@ fn find_and_clear_workspace_members(wasm_workspace: &Path) -> Vec { members } -fn create_wasm_workspace_project(wasm_workspace: &Path, cargo_manifest: &Path) { +fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Path) { let members = find_and_clear_workspace_members(wasm_workspace); - let crate_metadata = MetadataCommand::new() - .manifest_path(cargo_manifest) - .exec() - .expect("`cargo metadata` can not fail on project `Cargo.toml`; qed"); - let workspace_root_path = crate_metadata.workspace_root; - let mut workspace_toml: Table = toml::from_str( &fs::read_to_string( workspace_root_path.join("Cargo.toml"), @@ -281,8 +282,10 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, cargo_manifest: &Path) { p.iter_mut() .filter(|(k, _)| k == &"path") .for_each(|(_, v)| { - if let Some(path) = v.as_str() { - *v = workspace_root_path.join(path).display().to_string().into(); + if let Some(path) = v.as_str().map(PathBuf::from) { + if path.is_relative() { + *v = workspace_root_path.join(path).display().to_string().into(); + } } }) ); @@ -296,11 +299,45 @@ fn create_wasm_workspace_project(wasm_workspace: &Path, cargo_manifest: &Path) { ).expect("WASM workspace `Cargo.toml` writing can not fail; qed"); } +/// Get a list of enabled features for the project. +fn project_enabled_features( + cargo_manifest: &Path, + crate_metadata: &cargo_metadata::Metadata, +) -> Vec { + let package = crate_metadata.packages + .iter() + .find(|p| p.manifest_path == cargo_manifest) + .expect("Wasm project exists in its own metadata; qed"); + + let mut enabled_features = package.features.keys() + .filter(|f| { + let mut feature_env = f.replace("-", "_"); + feature_env.make_ascii_uppercase(); + + // We don't want to enable the `std`/`default` feature for the wasm build and + // we need to check if the feature is enabled by checking the env variable. + *f != "std" + && *f != "default" + && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() + }) + .cloned() + .collect::>(); + + enabled_features.sort(); + enabled_features +} + /// Create the project used to build the wasm binary. /// /// # Returns /// The path to the created project. -fn create_project(cargo_manifest: &Path, wasm_workspace: &Path) -> PathBuf { +fn create_project( + cargo_manifest: &Path, + wasm_workspace: &Path, + crate_metadata: &cargo_metadata::Metadata, +) -> PathBuf { let crate_name = get_crate_name(cargo_manifest); let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); let wasm_binary = get_wasm_binary_name(cargo_manifest); @@ -308,6 +345,8 @@ fn create_project(cargo_manifest: &Path, wasm_workspace: &Path) -> PathBuf { fs::create_dir_all(project_folder.join("src")).expect("Wasm project dir create can not fail; qed"); + let enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); + write_file_if_changed( project_folder.join("Cargo.toml"), format!( @@ -322,11 +361,12 @@ fn create_project(cargo_manifest: &Path, wasm_workspace: &Path) -> PathBuf { crate-type = ["cdylib"] [dependencies] - wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false }} + wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false, features = [ {features} ] }} "#, crate_name = crate_name, crate_path = crate_path.display(), wasm_binary = wasm_binary, + features = enabled_features.into_iter().map(|f| format!("\"{}\"", f)).join(","), ) ); @@ -453,6 +493,7 @@ fn generate_rerun_if_changed_instructions( println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TYPE_ENV); println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_RUSTFLAGS_ENV); println!("cargo:rerun-if-env-changed={}", crate::WASM_TARGET_DIRECTORY); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); } /// Copy the WASM binary to the target directory set in `WASM_TARGET_DIRECTORY` environment variable.